repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/federatedml/nn/homo/trainer/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/homo/trainer/fedavg_graph_trainer.py
|
import torch
import torch as t
import numpy as np
from torch_geometric.loader import NeighborLoader
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient as SecureAggClient
from federatedml.nn.dataset.base import Dataset
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
from federatedml.util import LOGGER
class FedAVGGraphTrainer(FedAVGTrainer):
"""
Parameters
----------
epochs: int >0, epochs to train
batch_size: int, -1 means full batch
secure_aggregate: bool, default is True, whether to use secure aggregation. if enabled, will add random number
mask to local models. These random number masks will eventually cancel out to get 0.
weighted_aggregation: bool, whether add weight to each local model when doing aggregation.
if True, According to origin paper, weight of a client is: n_local / n_global, where n_local
is the sample number locally and n_global is the sample number of all clients.
if False, simply averaging these models.
early_stop: None, 'diff' or 'abs'. if None, disable early stop; if 'diff', use the loss difference between
two epochs as early stop condition, if differences < tol, stop training ; if 'abs', if loss < tol,
stop training
tol: float, tol value for early stop
aggregate_every_n_epoch: None or int. if None, aggregate model on the end of every epoch, if int, aggregate
every n epochs.
cuda: bool, use cuda or not
pin_memory: bool, for pytorch DataLoader
shuffle: bool, for pytorch DataLoader
data_loader_worker: int, for pytorch DataLoader, number of workers when loading data
validation_freqs: None or int. if int, validate your model and send validate results to fate-board every n epoch.
if is binary classification task, will use metrics 'auc', 'ks', 'gain', 'lift', 'precision'
if is multi classification task, will use metrics 'precision', 'recall', 'accuracy'
if is regression task, will use metrics 'mse', 'mae', 'rmse', 'explained_variance', 'r2_score'
checkpoint_save_freqs: save model every n epoch, if None, will not save checkpoint.
task_type: str, 'auto', 'binary', 'multi', 'regression'
this option decides the return format of this trainer, and the evaluation type when running validation.
if auto, will automatically infer your task type from labels and predict results.
"""
def __init__(self, epochs=10, batch_size=512, # training parameter
early_stop=None, tol=0.0001, # early stop parameters
secure_aggregate=True, weighted_aggregation=True, aggregate_every_n_epoch=None, # federation
cuda=None, pin_memory=True, shuffle=True, data_loader_worker=0, # GPU & dataloader
validation_freqs=None, # validation configuration
checkpoint_save_freqs=None, # checkpoint configuration
task_type='auto',
num_neighbors=[10, 10],
):
super(FedAVGGraphTrainer, self).__init__(
epochs=epochs, batch_size=batch_size, # training parameter
early_stop=early_stop, tol=tol, # early stop parameters
secure_aggregate=secure_aggregate, weighted_aggregation=weighted_aggregation, aggregate_every_n_epoch=aggregate_every_n_epoch, # federation
cuda=cuda, pin_memory=pin_memory, shuffle=shuffle, data_loader_worker=data_loader_worker, # GPU & dataloader
validation_freqs=validation_freqs, # validation configuration
checkpoint_save_freqs=checkpoint_save_freqs, # checkpoint configuration
task_type=task_type,
)
self.comm_suffix = 'fedavg_graph'
LOGGER.debug("num_neighbors={}".format(num_neighbors))
self.num_neighbors = num_neighbors
def train(
self,
train_set: Dataset,
validate_set: Dataset = None,
optimizer: t.optim.Optimizer = None,
loss=None,
extra_dict={}):
ds = train_set
if self.cuda:
self.model = self.model.cuda()
if optimizer is None:
raise ValueError(
'FedAVGGraphTrainer requires an optimizer, but got None, please specify optimizer in the '
'job configuration')
if loss is None:
raise ValueError(
'FedAVGGraphTrainer requires a loss function, but got None, please specify loss function in the'
' job configuration')
if self.batch_size > len(ds.input_nodes_train) or self.batch_size == -1:
self.batch_size = len(ds.input_nodes_train)
dl = NeighborLoader(
data=ds.data,
num_neighbors=self.num_neighbors,
input_nodes=ds.input_nodes_train,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
shuffle=self.shuffle,
num_workers=self.data_loader_worker)
# compute round to aggregate
cur_agg_round = 0
if self.aggregate_every_n_epoch is not None:
aggregate_round = self.epochs // self.aggregate_every_n_epoch
else:
aggregate_round = self.epochs
# initialize fed avg client
if self.fed_mode:
if self.weighted_aggregation:
sample_num = len(ds.input_nodes_train)
else:
sample_num = 1.0
client_agg = SecureAggClient(
True, aggregate_weight=sample_num, communicate_match_suffix=self.comm_suffix)
else:
client_agg = None
# running var
cur_epoch = 0
loss_history = []
need_stop = False
evaluation_summary = {}
LOGGER.debug(self.model)
# training process
for i in range(self.epochs):
cur_epoch = i
LOGGER.info('epoch is {}'.format(i))
epoch_loss = 0.0
batch_idx = 0
acc_num = 0
for _, batch in enumerate(dl):
label = batch.y[:self.batch_size]
optimizer.zero_grad()
pred = self.model(batch.x, batch.edge_index)[:self.batch_size]
batch_loss = loss(pred, label)
batch_loss.backward()
optimizer.step()
batch_loss_np = batch_loss.detach().numpy(
) if not self.cuda else batch_loss.cpu().detach().numpy()
if acc_num + self.batch_size > len(ds.input_nodes_train):
batch_len = len(ds.input_nodes_train) - acc_num
else:
batch_len = self.batch_size
epoch_loss += batch_loss_np * batch_len
batch_idx += 1
if self.fed_mode:
LOGGER.debug(
'epoch {} batch {} finished'.format(
i, batch_idx))
# loss compute
epoch_loss = epoch_loss / len(ds.input_nodes_train)
self.callback_loss(epoch_loss, i)
loss_history.append(float(epoch_loss))
LOGGER.info('epoch loss is {}'.format(epoch_loss))
# federation process, if running local mode, cancel federation
if client_agg is not None:
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model averaging
self.model = client_agg.model_aggregation(self.model)
# agg loss and get converge status
converge_status = client_agg.loss_aggregation(epoch_loss)
cur_agg_round += 1
LOGGER.info(
'model averaging finished, aggregate round {}/{}'.format(
cur_agg_round, aggregate_round))
if converge_status:
LOGGER.info('early stop triggered, stop training')
need_stop = True
# validation process
if self.validation_freq and ((i + 1) % self.validation_freq == 0):
LOGGER.info('running validation')
ids_t, pred_t, label_t = self._predict(ds, 'train')
evaluation_summary = self.evaluation(
ids_t,
pred_t,
label_t,
dataset_type='train',
epoch_idx=i,
task_type=self.task_type)
if ds.input_nodes_vali is not None:
ids_v, pred_v, label_v = self._predict(ds, 'vali')
evaluation_summary = self.evaluation(
ids_v,
pred_v,
label_v,
dataset_type='validate',
epoch_idx=i,
task_type=self.task_type)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
self.checkpoint(
i, self.model, optimizer, converge_status=need_stop, loss_history=loss_history)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# if meet stop condition then stop
if need_stop:
break
# post-process
best_epoch = int(np.array(loss_history).argmin())
self.save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
self.summary({
'best_epoch': best_epoch,
'loss_history': loss_history,
'need_stop': need_stop,
'metrics_summary': evaluation_summary
})
def _predict(self, dataset: Dataset, which_ds='train'):
pred_result = []
# switch eval mode
dataset.eval()
self.model.eval()
if not dataset.has_sample_ids():
dataset.init_sid_and_getfunc(prefix=dataset.get_type())
if which_ds == 'train':
input_nodes = dataset.input_nodes_train
elif which_ds == 'vali':
input_nodes = dataset.input_nodes_vali
elif which_ds == 'test':
input_nodes = dataset.input_nodes_test
else:
raise ValueError("Nnknown dataset to predict!")
dl = NeighborLoader(
data=dataset.data,
num_neighbors=self.num_neighbors,
input_nodes=input_nodes,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
num_workers=self.data_loader_worker)
labels = []
with torch.no_grad():
for _, batch in enumerate(dl):
label = batch.y[:self.batch_size]
pred = self.model(batch.x, batch.edge_index)[:self.batch_size]
pred_result.append(pred)
labels.append(label)
ret_rs = torch.concat(pred_result, axis=0)
ret_label = torch.concat(labels, axis=0)
# switch back to train mode
dataset.train()
self.model.train()
LOGGER.debug(dataset.get_sample_ids())
LOGGER.debug(ret_rs)
LOGGER.debug(ret_label)
return dataset.get_sample_ids(), ret_rs, ret_label
| 11,560 | 41.977695 | 152 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing.non_distributed import LocalData
from federatedml.model_base import ModelBase
from federatedml.model_selection import start_cross_validation
from federatedml.nn.backend.utils.data import load_dataset
from federatedml.nn.dataset.base import Dataset, ShuffleWrapDataset
from federatedml.param.hetero_nn_param import HeteroNNParam
from federatedml.transfer_variable.transfer_class.hetero_nn_transfer_variable import HeteroNNTransferVariable
from federatedml.util import consts
class HeteroNNBase(ModelBase):
def __init__(self):
super(HeteroNNBase, self).__init__()
self.tol = None
self.early_stop = None
self.seed = 100
self.epochs = None
self.batch_size = None
self._header = []
self.predict_param = None
self.hetero_nn_param = None
self.batch_generator = None
self.model = None
self.partition = None
self.validation_freqs = None
self.early_stopping_rounds = None
self.metrics = []
self.use_first_metric_only = False
self.transfer_variable = HeteroNNTransferVariable()
self.model_param = HeteroNNParam()
self.mode = consts.HETERO
self.selector_param = None
self.floating_point_precision = None
self.history_iter_epoch = 0
self.iter_epoch = 0
self.data_x = []
self.data_y = []
self.dataset_cache_dict = {}
self.label_num = None
# nn related param
self.top_model_define = None
self.bottom_model_define = None
self.interactive_layer_define = None
self.dataset_shuffle = True
self.dataset = None
self.dataset_param = None
self.dataset_shuffle_seed = 100
def _init_model(self, hetero_nn_param: HeteroNNParam):
self.interactive_layer_lr = hetero_nn_param.interactive_layer_lr
self.epochs = hetero_nn_param.epochs
self.batch_size = hetero_nn_param.batch_size
self.seed = hetero_nn_param.seed
self.early_stop = hetero_nn_param.early_stop
self.validation_freqs = hetero_nn_param.validation_freqs
self.early_stopping_rounds = hetero_nn_param.early_stopping_rounds
self.metrics = hetero_nn_param.metrics
self.use_first_metric_only = hetero_nn_param.use_first_metric_only
self.tol = hetero_nn_param.tol
self.predict_param = hetero_nn_param.predict_param
self.hetero_nn_param = hetero_nn_param
self.selector_param = hetero_nn_param.selector_param
self.floating_point_precision = hetero_nn_param.floating_point_precision
# nn configs
self.bottom_model_define = hetero_nn_param.bottom_nn_define
self.top_model_define = hetero_nn_param.top_nn_define
self.interactive_layer_define = hetero_nn_param.interactive_layer_define
# dataset
dataset_param = hetero_nn_param.dataset.to_dict()
self.dataset = dataset_param['dataset_name']
self.dataset_param = dataset_param['param']
def reset_flowid(self):
new_flowid = ".".join([self.flowid, "evaluate"])
self.set_flowid(new_flowid)
def recovery_flowid(self):
new_flowid = ".".join(self.flowid.split(".", -1)[: -1])
self.set_flowid(new_flowid)
def _build_bottom_model(self):
pass
def _build_interactive_model(self):
pass
def _restore_model_meta(self, meta):
# self.hetero_nn_param.interactive_layer_lr = meta.interactive_layer_lr
self.hetero_nn_param.task_type = meta.task_type
if not self.component_properties.is_warm_start:
self.batch_size = meta.batch_size
self.epochs = meta.epochs
self.tol = meta.tol
self.early_stop = meta.early_stop
self.model.set_hetero_nn_model_meta(meta.hetero_nn_model_meta)
def _restore_model_param(self, param):
self.model.set_hetero_nn_model_param(param.hetero_nn_model_param)
self._header = list(param.header)
self.history_iter_epoch = param.iter_epoch
self.iter_epoch = param.iter_epoch
def set_partition(self, data_inst):
self.partition = data_inst.partitions
self.model.set_partition(self.partition)
def cross_validation(self, data_instances):
return start_cross_validation.run(self, data_instances)
def prepare_dataset(self, data, data_type='train', check_label=False):
# train input & validate input are DTables or path str
if isinstance(data, LocalData):
data = data.path
if isinstance(data, Dataset) or isinstance(data, ShuffleWrapDataset):
ds = data
else:
ds = load_dataset(
self.dataset,
data,
self.dataset_param,
self.dataset_cache_dict)
if not ds.has_sample_ids():
raise ValueError(
'Dataset has no sample id, this is not allowed in hetero-nn, please make sure'
' that you implement get_sample_ids()')
if self.dataset_shuffle:
ds = ShuffleWrapDataset(
ds, shuffle_seed=self.dataset_shuffle_seed)
if self.role == consts.GUEST:
self.transfer_variable.dataset_info.remote(
ds.idx_map, idx=-1, suffix=('idx_map', data_type))
if self.role == consts.HOST:
idx_map = self.transfer_variable.dataset_info.get(
idx=0, suffix=('idx_map', data_type))
assert len(idx_map) == len(ds), 'host dataset len != guest dataset len, please check your dataset,' \
'guest len {}, host len {}'.format(len(idx_map), len(ds))
ds.set_shuffled_idx(idx_map)
if check_label:
try:
all_classes = ds.get_classes()
except NotImplementedError as e:
raise NotImplementedError(
'get_classes() is not implemented, please implement this function'
' when you are using hetero-nn. Let it return classes in a list.'
' Please see built-in dataset(table.py for example) for reference')
except BaseException as e:
raise e
from federatedml.util import LOGGER
LOGGER.debug('all classes is {}'.format(all_classes))
if self.label_num is None:
if self.task_type == consts.CLASSIFICATION:
self.label_num = len(all_classes)
elif self.task_type == consts.REGRESSION:
self.label_num = 1
return ds
# override function
@staticmethod
def set_predict_data_schema(predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {
"header": [
"label",
"predict_result",
"predict_score",
"predict_detail",
"type",
],
"sid": 'id',
"content_type": "predict_result"
}
if schema.get("match_id_name") is not None:
predict_data.schema["match_id_name"] = schema.get(
"match_id_name")
return predict_data
| 8,452 | 36.402655 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch.utils.data import DataLoader
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.nn.hetero.base import HeteroNNBase
from federatedml.nn.hetero.model import HeteroNNHostModel
from federatedml.param.hetero_nn_param import HeteroNNParam as NNParameter
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNMeta
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNParam
from federatedml.util import consts, LOGGER
MODELMETA = "HeteroNNHostMeta"
MODELPARAM = "HeteroNNHostParam"
class HeteroNNHost(HeteroNNBase):
def __init__(self):
super(HeteroNNHost, self).__init__()
self.batch_generator = batch_generator.Host()
self.model = None
self.role = consts.HOST
self.input_shape = None
self.default_table_partitions = 4
def _init_model(self, hetero_nn_param):
super(HeteroNNHost, self)._init_model(hetero_nn_param)
def export_model(self):
if self.need_cv:
return None
model = {MODELMETA: self._get_model_meta(),
MODELPARAM: self._get_model_param()}
return model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
param = model_dict.get(MODELPARAM)
meta = model_dict.get(MODELMETA)
if self.hetero_nn_param is None:
self.hetero_nn_param = NNParameter()
self.hetero_nn_param.check()
self.predict_param = self.hetero_nn_param.predict_param
self._build_model()
self._restore_model_meta(meta)
self._restore_model_param(param)
def _build_model(self):
self.model = HeteroNNHostModel(self.hetero_nn_param, self.flowid)
self.model.set_transfer_variable(self.transfer_variable)
self.model.set_partition(self.default_table_partitions)
def predict(self, data_inst):
ds = self.prepare_dataset(data_inst, data_type='predict')
batch_size = len(ds) if self.batch_size == -1 else self.batch_size
for batch_data in DataLoader(ds, batch_size=batch_size):
# ignore label if the dataset offers label
if isinstance(batch_data, tuple) and len(batch_data) > 1:
batch_data = batch_data[0]
self.model.predict(batch_data)
def fit(self, data_inst, validate_data=None):
if hasattr(
data_inst,
'partitions') and data_inst.partitions is not None:
self.default_table_partitions = data_inst.partitions
LOGGER.debug(
'reset default partitions is {}'.format(
self.default_table_partitions))
train_ds = self.prepare_dataset(data_inst, data_type='train')
if validate_data is not None:
val_ds = self.prepare_dataset(validate_data, data_type='validate')
else:
val_ds = None
self.callback_list.on_train_begin(train_ds, val_ds)
if not self.component_properties.is_warm_start:
self._build_model()
epoch_offset = 0
else:
self.callback_warm_start_init_iter(self.history_iter_epoch)
epoch_offset = self.history_iter_epoch + 1
batch_size = len(train_ds) if self.batch_size == - \
1 else self.batch_size
for cur_epoch in range(epoch_offset, epoch_offset + self.epochs):
self.iter_epoch = cur_epoch
for batch_idx, batch_data in enumerate(
DataLoader(train_ds, batch_size=batch_size)):
self.model.train(batch_data, cur_epoch, batch_idx)
self.callback_list.on_epoch_end(cur_epoch)
if self.callback_variables.stop_training:
LOGGER.debug('early stopping triggered')
break
is_converge = self.transfer_variable.is_converge.get(
idx=0, suffix=(cur_epoch,))
if is_converge:
LOGGER.debug(
"Training process is converged in epoch {}".format(cur_epoch))
break
self.callback_list.on_train_end()
def _get_model_meta(self):
model_meta = HeteroNNMeta()
model_meta.batch_size = self.batch_size
model_meta.hetero_nn_model_meta.CopyFrom(
self.model.get_hetero_nn_model_meta())
model_meta.module = 'HeteroNN'
return model_meta
def _get_model_param(self):
model_param = HeteroNNParam()
model_param.iter_epoch = self.iter_epoch
model_param.header.extend(self._header)
model_param.hetero_nn_model_param.CopyFrom(
self.model.get_hetero_nn_model_param())
model_param.best_iteration = self.callback_variables.best_iteration
return model_param
| 5,442 | 36.027211 | 82 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import json
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.param.hetero_nn_param import HeteroNNParam
from federatedml.nn.hetero.strategy.selector import SelectorFactory
from federatedml.nn.hetero.nn_component.bottom_model import BottomModel
from federatedml.nn.hetero.nn_component.top_model import TopModel
from federatedml.nn.backend.utils.common import global_seed
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNModelMeta
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import OptimizerParam
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNModelParam
from federatedml.nn.hetero.interactive.he_interactive_layer import HEInteractiveLayerGuest, HEInteractiveLayerHost
class HeteroNNModel(object):
def __init__(self):
self.partition = 1
self.batch_size = None
self.bottom_nn_define = None
self.top_nn_define = None
self.interactive_layer_define = None
self.optimizer = None
self.config_type = None
self.transfer_variable = None
self._predict_round = 0
def load_model(self):
pass
def predict(self, data):
pass
def export_model(self):
pass
def get_hetero_nn_model_meta(self):
pass
def get_hetero_nn_model_param(self):
pass
def set_hetero_nn_model_meta(self, model_meta):
pass
def set_hetero_nn_model_param(self, model_param):
pass
def set_partition(self, partition):
pass
def inc_predict_round(self):
self._predict_round += 1
class HeteroNNGuestModel(HeteroNNModel):
def __init__(self, hetero_nn_param, component_properties, flowid):
super(HeteroNNGuestModel, self).__init__()
self.role = consts.GUEST
self.bottom_model: BottomModel = None
self.top_model: TopModel = None
self.interactive_model: HEInteractiveLayerGuest = None
self.loss = None
self.hetero_nn_param = None
self.is_empty = False
self.coae_param = None
self.seed = 100
self.set_nn_meta(hetero_nn_param)
self.component_properties = component_properties
self.flowid = flowid
self.label_num = 1
self.selector = SelectorFactory.get_selector(
hetero_nn_param.selector_param.method,
hetero_nn_param.selector_param.selective_size,
beta=hetero_nn_param.selector_param.beta,
random_rate=hetero_nn_param.selector_param.random_state,
min_prob=hetero_nn_param.selector_param.min_prob)
def set_nn_meta(self, hetero_nn_param: HeteroNNParam):
self.bottom_nn_define = hetero_nn_param.bottom_nn_define
self.top_nn_define = hetero_nn_param.top_nn_define
self.interactive_layer_define = hetero_nn_param.interactive_layer_define
self.config_type = hetero_nn_param.config_type
self.optimizer = hetero_nn_param.optimizer
self.loss = hetero_nn_param.loss
self.hetero_nn_param = hetero_nn_param
self.batch_size = hetero_nn_param.batch_size
self.seed = hetero_nn_param.seed
coae_param = hetero_nn_param.coae_param
if coae_param.enable:
self.coae_param = coae_param
def set_empty(self):
self.is_empty = True
def set_label_num(self, label_num):
self.label_num = label_num
if self.top_model is not None: # warmstart case
self.top_model.label_num = label_num
def train(self, x, y, epoch, batch_idx):
if self.batch_size == -1:
self.batch_size = x.shape[0]
global_seed(self.seed)
if self.top_model is None:
self._build_top_model()
LOGGER.debug('top model is {}'.format(self.top_model))
if not self.is_empty:
if self.bottom_model is None:
self._build_bottom_model()
LOGGER.debug('bottom model is {}'.format(self.bottom_model))
self.bottom_model.train_mode(True)
guest_bottom_output = self.bottom_model.forward(x)
else:
guest_bottom_output = None
if self.interactive_model is None:
self._build_interactive_model()
interactive_output = self.interactive_model.forward(
x=guest_bottom_output, epoch=epoch, batch=batch_idx, train=True)
self.top_model.train_mode(True)
selective_ids, gradients, loss = self.top_model.train_and_get_backward_gradient(
interactive_output, y)
interactive_layer_backward = self.interactive_model.backward(
error=gradients, epoch=epoch, batch=batch_idx, selective_ids=selective_ids)
if not self.is_empty:
self.bottom_model.backward(
x, interactive_layer_backward, selective_ids)
return loss
def predict(self, x, batch=0):
if not self.is_empty:
self.bottom_model.train_mode(False)
guest_bottom_output = self.bottom_model.predict(x)
else:
guest_bottom_output = None
interactive_output = self.interactive_model.forward(
guest_bottom_output, epoch=self._predict_round, batch=batch, train=False)
self.top_model.train_mode(False)
preds = self.top_model.predict(interactive_output)
# prediction procedure has its prediction iteration count, we do this
# to avoid reusing communication suffixes
self.inc_predict_round()
return preds
def get_hetero_nn_model_param(self):
model_param = HeteroNNModelParam()
model_param.is_empty = self.is_empty
if not self.is_empty:
model_param.bottom_saved_model_bytes = self.bottom_model.export_model()
model_param.top_saved_model_bytes = self.top_model.export_model()
model_param.interactive_layer_param.CopyFrom(
self.interactive_model.export_model())
coae_bytes = self.top_model.export_coae()
if coae_bytes is not None:
model_param.coae_bytes = coae_bytes
return model_param
def set_hetero_nn_model_param(self, model_param):
self.is_empty = model_param.is_empty
if not self.is_empty:
self._restore_bottom_model(model_param.bottom_saved_model_bytes)
self._restore_interactive_model(model_param.interactive_layer_param)
self._restore_top_model(model_param.top_saved_model_bytes)
self.top_model.restore_coae(model_param.coae_bytes)
def get_hetero_nn_model_meta(self):
model_meta = HeteroNNModelMeta()
model_meta.config_type = self.config_type
model_meta.bottom_nn_define.append(json.dumps(self.bottom_nn_define))
model_meta.top_nn_define.append(json.dumps(self.top_nn_define))
model_meta.interactive_layer_define = json.dumps(
self.interactive_layer_define)
model_meta.interactive_layer_lr = self.hetero_nn_param.interactive_layer_lr
optimizer_param = OptimizerParam()
model_meta.loss = json.dumps(self.loss)
optimizer_param.optimizer = self.optimizer['optimizer']
tmp_dict = copy.deepcopy(self.optimizer)
tmp_dict.pop('optimizer')
optimizer_param.kwargs = json.dumps(tmp_dict)
model_meta.optimizer_param.CopyFrom(optimizer_param)
return model_meta
def set_hetero_nn_model_meta(self, model_meta):
self.config_type = model_meta.config_type
self.bottom_nn_define = json.loads(model_meta.bottom_nn_define[0])
self.top_nn_define = json.loads(model_meta.top_nn_define[0])
self.interactive_layer_define = json.loads(
model_meta.interactive_layer_define)
self.loss = json.loads(model_meta.loss)
if self.optimizer is None:
from types import SimpleNamespace
self.optimizer = SimpleNamespace(optimizer=None, kwargs={})
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(
model_meta.optimizer_param.kwargs)
tmp_opt = {'optimizer': self.optimizer.optimizer}
tmp_opt.update(self.optimizer.kwargs)
self.optimizer = tmp_opt
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition=1):
self.partition = partition
if self.interactive_model is not None:
self.interactive_model.set_partition(self.partition)
def _init_bottom_select_strategy(self):
if self.selector:
self.bottom_model.set_backward_select_strategy()
self.bottom_model.set_batch(self.batch_size)
def _build_bottom_model(self):
self.bottom_model = BottomModel(
optimizer=self.optimizer,
layer_config=self.bottom_nn_define)
self._init_bottom_select_strategy()
def _restore_bottom_model(self, model_bytes):
self._build_bottom_model()
self.bottom_model.restore_model(model_bytes)
self._init_bottom_select_strategy()
def _init_top_select_strategy(self):
if self.selector:
self.top_model.set_backward_selector_strategy(
selector=self.selector)
self.top_model.set_batch(self.batch_size)
def _build_top_model(self):
if self.top_nn_define is None:
raise ValueError(
'top nn model define is None, you must define your top model in guest side')
self.top_model = TopModel(
optimizer=self.optimizer,
layer_config=self.top_nn_define,
loss=self.loss,
coae_config=self.coae_param,
label_num=self.label_num
)
self._init_top_select_strategy()
def _restore_top_model(self, model_bytes):
self._build_top_model()
self.top_model.restore_model(model_bytes)
self._init_top_select_strategy()
def _init_inter_layer(self):
self.interactive_model.set_partition(self.partition)
self.interactive_model.set_batch(self.batch_size)
self.interactive_model.set_flow_id('{}_interactive_layer'.format(self.flowid))
if self.selector:
self.interactive_model.set_backward_select_strategy()
def _build_interactive_model(self):
self.interactive_model = HEInteractiveLayerGuest(
params=self.hetero_nn_param,
layer_config=self.interactive_layer_define,
host_num=len(
self.component_properties.host_party_idlist))
self._init_inter_layer()
def _restore_interactive_model(self, interactive_model_param):
self._build_interactive_model()
self.interactive_model.restore_model(interactive_model_param)
self._init_inter_layer()
class HeteroNNHostModel(HeteroNNModel):
def __init__(self, hetero_nn_param, flowid):
super(HeteroNNHostModel, self).__init__()
self.role = consts.HOST
self.bottom_model: BottomModel = None
self.interactive_model = None
self.hetero_nn_param = None
self.seed = 100
self.set_nn_meta(hetero_nn_param)
self.selector = SelectorFactory.get_selector(
hetero_nn_param.selector_param.method,
hetero_nn_param.selector_param.selective_size,
beta=hetero_nn_param.selector_param.beta,
random_rate=hetero_nn_param.selector_param.random_state,
min_prob=hetero_nn_param.selector_param.min_prob)
self.flowid = flowid
def set_nn_meta(self, hetero_nn_param):
self.bottom_nn_define = hetero_nn_param.bottom_nn_define
self.config_type = hetero_nn_param.config_type
self.optimizer = hetero_nn_param.optimizer
self.hetero_nn_param = hetero_nn_param
self.batch_size = hetero_nn_param.batch_size
self.seed = hetero_nn_param.seed
def _build_bottom_model(self):
if self.bottom_nn_define is None:
raise ValueError(
'bottom nn model define is None, you must define your bottom model in host')
self.bottom_model = BottomModel(
optimizer=self.optimizer,
layer_config=self.bottom_nn_define)
def _restore_bottom_model(self, model_bytes):
self._build_bottom_model()
self.bottom_model.restore_model(model_bytes)
def _build_interactive_model(self):
self.interactive_model = HEInteractiveLayerHost(self.hetero_nn_param)
self.interactive_model.set_partition(self.partition)
self.interactive_model.set_flow_id('{}_interactive_layer'.format(self.flowid))
def _restore_interactive_model(self, interactive_layer_param):
self._build_interactive_model()
self.interactive_model.restore_model(interactive_layer_param)
self.interactive_model.set_partition(self.partition)
self.interactive_model.set_flow_id('{}_interactive_layer'.format(self.flowid))
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition=1):
self.partition = partition
if self.interactive_model is not None:
self.interactive_model.set_partition(self.partition)
LOGGER.debug(
"set_partition, partition num is {}".format(
self.partition))
def get_hetero_nn_model_meta(self):
model_meta = HeteroNNModelMeta()
model_meta.config_type = self.config_type
model_meta.bottom_nn_define.append(json.dumps(self.bottom_nn_define))
model_meta.interactive_layer_lr = self.hetero_nn_param.interactive_layer_lr
optimizer_param = OptimizerParam()
optimizer_param.optimizer = self.optimizer['optimizer']
tmp_opt = copy.deepcopy(self.optimizer)
tmp_opt.pop('optimizer')
optimizer_param.kwargs = json.dumps(tmp_opt)
model_meta.optimizer_param.CopyFrom(optimizer_param)
return model_meta
def set_hetero_nn_model_meta(self, model_meta):
self.config_type = model_meta.config_type
self.bottom_nn_define = json.loads(model_meta.bottom_nn_define[0])
if self.optimizer is None:
from types import SimpleNamespace
self.optimizer = SimpleNamespace(optimizer=None, kwargs={})
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(
model_meta.optimizer_param.kwargs)
tmp_opt = {'optimizer': self.optimizer.optimizer}
tmp_opt.update(self.optimizer.kwargs)
self.optimizer = tmp_opt
def set_hetero_nn_model_param(self, model_param):
self._restore_bottom_model(model_param.bottom_saved_model_bytes)
self._restore_interactive_model(model_param.interactive_layer_param)
def get_hetero_nn_model_param(self):
model_param = HeteroNNModelParam()
model_param.bottom_saved_model_bytes = self.bottom_model.export_model()
model_param.interactive_layer_param.CopyFrom(
self.interactive_model.export_model())
return model_param
def train(self, x, epoch, batch_idx):
if self.bottom_model is None:
global_seed(self.seed)
self._build_bottom_model()
if self.batch_size == -1:
self.batch_size = x.shape[0]
self._build_interactive_model()
if self.selector:
self.bottom_model.set_backward_select_strategy()
self.bottom_model.set_batch(self.batch_size)
self.interactive_model.set_backward_select_strategy()
self.bottom_model.train_mode(True)
host_bottom_output = self.bottom_model.forward(x)
self.interactive_model.forward(
host_bottom_output, epoch, batch_idx, train=True)
host_gradient, selective_ids = self.interactive_model.backward(
epoch, batch_idx)
self.bottom_model.backward(x, host_gradient, selective_ids)
def predict(self, x, batch=0):
self.bottom_model.train_mode(False)
guest_bottom_output = self.bottom_model.predict(x)
self.interactive_model.forward(
guest_bottom_output,
epoch=self._predict_round,
batch=batch,
train=False)
# prediction procedure has its prediction iteration count, we do this
# to avoid reusing communication suffixes
self.inc_predict_round()
| 17,203 | 37.573991 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/hetero/guest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch
from torch.utils.data import DataLoader
from fate_arch.computing._util import is_table
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.nn.hetero.base import HeteroNNBase
from federatedml.nn.hetero.model import HeteroNNGuestModel
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.hetero_nn_param import HeteroNNParam as NNParameter
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNMeta
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNParam
from federatedml.util import consts, LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.nn.dataset.table import TableDataset
from federatedml.statistic.data_overview import check_with_inst_id
from federatedml.nn.backend.utils.data import add_match_id
MODELMETA = "HeteroNNGuestMeta"
MODELPARAM = "HeteroNNGuestParam"
class HeteroNNGuest(HeteroNNBase):
def __init__(self):
super(HeteroNNGuest, self).__init__()
self.task_type = None
self.converge_func = None
self.batch_generator = batch_generator.Guest()
self.data_keys = []
self.label_dict = {}
self.model = None
self.role = consts.GUEST
self.history_loss = []
self.input_shape = None
self._summary_buf = {"history_loss": [],
"is_converged": False,
"best_iteration": -1}
self.dataset_cache_dict = {}
self.default_table_partitions = 4
def _init_model(self, hetero_nn_param):
super(HeteroNNGuest, self)._init_model(hetero_nn_param)
self.task_type = hetero_nn_param.task_type
self.converge_func = converge_func_factory(self.early_stop, self.tol)
def _build_model(self):
self.model = HeteroNNGuestModel(
self.hetero_nn_param, self.component_properties, self.flowid)
self.model.set_transfer_variable(self.transfer_variable)
self.model.set_partition(self.default_table_partitions)
def _set_loss_callback_info(self):
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
@staticmethod
def _disable_sample_weight(dataset):
# currently not support sample weight
if isinstance(dataset, TableDataset):
dataset.with_sample_weight = False
def fit(self, data_inst, validate_data=None):
if hasattr(
data_inst,
'partitions') and data_inst.partitions is not None:
self.default_table_partitions = data_inst.partitions
LOGGER.debug(
'reset default partitions is {}'.format(
self.default_table_partitions))
train_ds = self.prepare_dataset(
data_inst, data_type='train', check_label=True)
train_ds.train() # set dataset to train mode
self._disable_sample_weight(train_ds)
if validate_data is not None:
val_ds = self.prepare_dataset(validate_data, data_type='validate')
val_ds.train() # set dataset to train mode
self._disable_sample_weight(val_ds)
else:
val_ds = None
self.callback_list.on_train_begin(train_ds, val_ds)
# collect data from table to form data loader
if not self.component_properties.is_warm_start:
self._build_model()
epoch_offset = 0
else:
self.callback_warm_start_init_iter(self.history_iter_epoch)
epoch_offset = self.history_iter_epoch + 1
# set label number
self.model.set_label_num(self.label_num)
if len(train_ds) == 0:
self.model.set_empty()
self._set_loss_callback_info()
batch_size = len(train_ds) if self.batch_size == - \
1 else self.batch_size
data_loader = DataLoader(
train_ds,
batch_size=batch_size,
num_workers=4)
for cur_epoch in range(epoch_offset, self.epochs + epoch_offset):
self.iter_epoch = cur_epoch
LOGGER.debug("cur epoch is {}".format(cur_epoch))
self.callback_list.on_epoch_begin(cur_epoch)
epoch_loss = 0
acc_sample_num = 0
for batch_idx, (batch_data, batch_label) in enumerate(data_loader):
batch_loss = self.model.train(
batch_data, batch_label, cur_epoch, batch_idx)
if acc_sample_num + batch_size > len(train_ds):
batch_len = len(train_ds) - acc_sample_num
else:
batch_len = batch_size
acc_sample_num += batch_size
epoch_loss += batch_loss * batch_len
epoch_loss = epoch_loss / len(train_ds)
LOGGER.debug("epoch {} loss is {}".format(cur_epoch, epoch_loss))
self.callback_metric("loss",
"train",
[Metric(cur_epoch, epoch_loss)])
self.history_loss.append(epoch_loss)
self.callback_list.on_epoch_end(cur_epoch)
if self.callback_variables.stop_training:
LOGGER.debug('early stopping triggered')
break
if self.hetero_nn_param.selector_param.method:
# when use selective bp, loss converge will be disabled
is_converge = False
else:
is_converge = self.converge_func.is_converge(epoch_loss)
self._summary_buf["is_converged"] = is_converge
self.transfer_variable.is_converge.remote(is_converge,
role=consts.HOST,
idx=-1,
suffix=(cur_epoch,))
if is_converge:
LOGGER.debug(
"Training process is converged in epoch {}".format(cur_epoch))
break
self.callback_list.on_train_end()
self.set_summary(self._get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_inst):
with_match_id = False
if is_table(data_inst):
with_match_id = check_with_inst_id(data_inst)
ds = self.prepare_dataset(data_inst, data_type='predict')
ds.eval() # set dataset to eval mode
self._disable_sample_weight(ds)
keys = ds.get_sample_ids()
batch_size = len(ds) if self.batch_size == -1 else self.batch_size
dl = DataLoader(ds, batch_size=batch_size)
preds = []
labels = []
for batch_data, batch_label in dl:
batch_pred = self.model.predict(batch_data)
preds.append(batch_pred)
labels.append(batch_label)
preds = np.concatenate(preds, axis=0)
labels = torch.concat(labels, dim=0).cpu().numpy().flatten().tolist()
id_table = [(id_, Instance(label=l)) for id_, l in zip(keys, labels)]
if with_match_id:
add_match_id(id_table, ds.ds) # ds is wrap shuffle dataset here
data_inst = session.parallelize(
id_table,
partition=self.default_table_partitions,
include_key=True)
if self.task_type == consts.REGRESSION:
preds = preds.flatten().tolist()
preds = [float(pred) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
result = self.predict_score_to_output(data_inst, predict_tb)
else:
if self.label_num > 2:
preds = preds.tolist()
preds = [list(map(float, pred)) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
result = self.predict_score_to_output(
data_inst, predict_tb, classes=list(range(self.label_num)))
else:
preds = preds.flatten().tolist()
preds = [float(pred) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
threshold = self.predict_param.threshold
result = self.predict_score_to_output(
data_inst, predict_tb, classes=[
0, 1], threshold=threshold)
return result
def export_model(self):
if self.need_cv:
return None
model = {MODELMETA: self._get_model_meta(),
MODELPARAM: self._get_model_param()}
return model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
param = model_dict.get(MODELPARAM)
meta = model_dict.get(MODELMETA)
if self.hetero_nn_param is None:
self.hetero_nn_param = NNParameter()
self.hetero_nn_param.check()
self.predict_param = self.hetero_nn_param.predict_param
self._build_model()
self._restore_model_meta(meta)
self._restore_model_param(param)
def _get_model_summary(self):
self._summary_buf["history_loss"] = self.history_loss
if self.callback_variables.validation_summary:
self._summary_buf["validation_metrics"] = self.callback_variables.validation_summary
"""
if self.validation_strategy:
validation_summary = self.validation_strategy.summary()
if validation_summary:
self._summary_buf["validation_metrics"] = validation_summary
"""
return self._summary_buf
def _get_model_meta(self):
model_meta = HeteroNNMeta()
model_meta.task_type = self.task_type
model_meta.module = 'HeteroNN'
model_meta.batch_size = self.batch_size
model_meta.epochs = self.epochs
model_meta.early_stop = self.early_stop
model_meta.tol = self.tol
model_meta.hetero_nn_model_meta.CopyFrom(
self.model.get_hetero_nn_model_meta())
return model_meta
def _get_model_param(self):
model_param = HeteroNNParam()
model_param.iter_epoch = self.iter_epoch
model_param.hetero_nn_model_param.CopyFrom(
self.model.get_hetero_nn_model_param())
model_param.num_label = self.label_num
model_param.best_iteration = self.callback_variables.best_iteration
model_param.header.extend(self._header)
for loss in self.history_loss:
model_param.history_loss.append(loss)
return model_param
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.label_num == 2:
return EvaluateParam(eval_type="binary",
pos_label=1, metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def _restore_model_param(self, param):
super(HeteroNNGuest, self)._restore_model_param(param)
self.label_num = param.num_label
| 12,612 | 37.571865 | 96 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/nn_component/bottom_model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch as t
import numpy as np
from federatedml.util import LOGGER
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
class BottomModel(object):
def __init__(self, optimizer, layer_config):
self._model: TorchNNModel = TorchNNModel(nn_define=layer_config, optimizer_define=optimizer,
loss_fn_define=None)
self.do_backward_select_strategy = False
self.x = []
self.x_cached = []
self.batch_size = None
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def train_mode(self, mode):
self._model.train_mode(mode)
def forward(self, x):
LOGGER.debug("bottom model start to forward propagation")
self.x = x
if self.do_backward_select_strategy:
if (not isinstance(x, np.ndarray) and not isinstance(x, t.Tensor)):
raise ValueError(
'When using selective bp, data from dataset must be a ndarray or a torch tensor, but got {}'.format(
type(x)))
if self.do_backward_select_strategy:
output_data = self._model.predict(x)
else:
output_data = self._model.forward(x)
return output_data
def backward(self, x, error, selective_ids):
LOGGER.debug("bottom model start to backward propagation")
if self.do_backward_select_strategy:
if selective_ids:
if len(self.x_cached) == 0:
self.x_cached = self.x[selective_ids]
else:
self.x_cached = np.vstack(
(self.x_cached, self.x[selective_ids]))
if len(error) == 0:
return
x = self.x_cached[: self.batch_size]
self.x_cached = self.x_cached[self.batch_size:]
self._model.train((x, error))
else:
self._model.backward(error)
LOGGER.debug('bottom model update parameters:')
def predict(self, x):
return self._model.predict(x)
def export_model(self):
return self._model.export_model()
def restore_model(self, model_bytes):
self._model = self._model.restore_model(model_bytes)
def __repr__(self):
return 'bottom model contains {}'.format(self._model.__repr__())
| 3,119 | 32.913043 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/nn_component/top_model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
from federatedml.nn.hetero.protection_enhance.coae import train_an_autoencoder_confuser, CoAE, coae_label_reformat, \
CrossEntropy
from federatedml.util import LOGGER
class TopModel(object):
def __init__(self, loss, optimizer, layer_config, coae_config, label_num):
self.coae = None
self.coae_config = coae_config
self.label_num = label_num
LOGGER.debug('label num is {}'.format(self.label_num))
self._model: TorchNNModel = TorchNNModel(nn_define=layer_config, optimizer_define=optimizer,
loss_fn_define=loss)
self.label_reformat = None
if self.coae_config:
self._model.loss_fn = CrossEntropy()
if self.coae_config:
self.label_reformat = coae_label_reformat
self.batch_size = None
self.selector = None
self.batch_data_cached_X = []
self.batch_data_cached_y = []
def set_backward_selector_strategy(self, selector):
self.selector = selector
def set_batch(self, batch_size):
self.batch_size = batch_size
def train_mode(self, mode):
self._model.train_mode(mode)
def train_and_get_backward_gradient(self, x, y):
LOGGER.debug("top model start to forward propagation")
selective_id = []
input_gradient = []
# transform label format
if self.label_reformat:
y = self.label_reformat(y, label_num=self.label_num)
# train an auto-encoder confuser
if self.coae_config and self.coae is None:
LOGGER.debug('training coae encoder')
self.coae: CoAE = train_an_autoencoder_confuser(y.shape[1], self.coae_config.epoch,
self.coae_config.lambda1, self.coae_config.lambda2,
self.coae_config.lr, self.coae_config.verbose)
# make fake soft label
if self.coae:
# transform labels to fake labels
y = self.coae.encode(y).detach().numpy()
LOGGER.debug('fake labels are {}'.format(y))
# run selector
if self.selector:
# when run selective bp, need to convert y to numpy format
if isinstance(y, torch.Tensor):
y = y.cpu().numpy()
losses = self._model.get_forward_loss_from_input(x, y)
loss = sum(losses) / len(losses)
selective_strategy = self.selector.select_batch_sample(losses)
for idx, select in enumerate(selective_strategy):
if select:
selective_id.append(idx)
self.batch_data_cached_X.append(x[idx])
self.batch_data_cached_y.append(y[idx])
if len(self.batch_data_cached_X) >= self.batch_size:
data = (np.array(self.batch_data_cached_X[: self.batch_size]),
np.array(self.batch_data_cached_y[: self.batch_size]))
input_gradient = self._model.get_input_gradients(data[0], data[1])[
0]
self._model.train(data)
self.batch_data_cached_X = self.batch_data_cached_X[self.batch_size:]
self.batch_data_cached_y = self.batch_data_cached_y[self.batch_size:]
else:
input_gradient = self._model.get_input_gradients(x, y)[0]
self._model.train((x, y))
loss = self._model.get_loss()[0]
return selective_id, input_gradient, loss
def predict(self, input_data):
output_data = self._model.predict(input_data)
if self.coae:
real_output = self.coae.decode(output_data).detach().numpy()
if real_output.shape[1] == 2:
real_output = real_output[::, 1].reshape((-1, 1))
return real_output
else:
return output_data
def export_coae(self):
if self.coae:
model_bytes = TorchNNModel.get_model_bytes(self.coae)
return model_bytes
else:
return None
def restore_coae(self, model_bytes):
if model_bytes is not None and len(model_bytes) > 0:
coae = TorchNNModel.recover_model_bytes(model_bytes)
self.coae = coae
def export_model(self):
return self._model.export_model()
def restore_model(self, model_bytes):
self._model = self._model.restore_model(model_bytes)
def __repr__(self):
return 'top model contains {}'.format(self._model.__repr__())
| 5,348 | 35.636986 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/nn_component/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/hetero/nn_component/torch_model.py
|
import numpy as np
import tempfile
from federatedml.util import LOGGER
try: # for the situation that torch is not installed, but other modules still can be used
import torch
import torch as t
import copy
from types import SimpleNamespace
from torch import autograd
from federatedml.nn.backend.torch import serialization as s
from federatedml.nn.backend.torch.base import FateTorchOptimizer
from federatedml.nn.backend.torch.nn import CrossEntropyLoss
from federatedml.nn.backend.torch import optim
except ImportError:
pass
def backward_loss(z, backward_error):
return t.sum(z * backward_error)
class TorchNNModel(object):
def __init__(self, nn_define: dict, optimizer_define: dict = None, loss_fn_define: dict = None, cuda=False):
self.cuda = cuda
self.double_model = False
if self.cuda and not t.cuda.is_available():
raise ValueError(
'this machine dose not support cuda, cuda.is_available() is False')
self.optimizer_define = optimizer_define
self.nn_define = nn_define
self.loss_fn_define = loss_fn_define
self.loss_history = []
self.model, self.opt_inst, self.loss_fn = self.init(
self.nn_define, self.optimizer_define, self.loss_fn_define)
self.fw_cached = None
def to_tensor(self, x: np.ndarray):
if isinstance(x, np.ndarray):
x = t.from_numpy(x)
if self.cuda:
return x.cuda()
else:
return x
def label_convert(self, y, loss_fn):
# pytorch CE loss require 1D-int64-tensor
if isinstance(loss_fn, CrossEntropyLoss):
return t.Tensor(y).flatten().type(
t.int64).flatten() # accept 1-D array
else:
return t.Tensor(y).type(t.float)
def init(self, nn_define: dict, optimizer_define: dict = None, loss_fn_define: dict = None):
model = s.recover_sequential_from_dict(nn_define)
if self.cuda:
model = model.cuda()
if optimizer_define is None: # default optimizer
optimizer = optim.SGD(lr=0.01)
else:
optimizer: FateTorchOptimizer = s.recover_optimizer_from_dict(optimizer_define)
opt_inst = optimizer.to_torch_instance(model.parameters())
if loss_fn_define is None:
loss_fn = backward_loss
else:
loss_fn = s.recover_loss_fn_from_dict(loss_fn_define)
if self.double_model:
self.model.type(t.float64)
return model, opt_inst, loss_fn
def print_parameters(self):
LOGGER.debug(
'model parameter is {}'.format(
list(
self.model.parameters())))
def __repr__(self):
return self.model.__repr__() + '\n' + self.opt_inst.__repr__() + \
'\n' + str(self.loss_fn)
def train_mode(self, mode):
self.model.train(mode)
def train(self, data_x_and_y):
x, y = data_x_and_y # this is a tuple
self.opt_inst.zero_grad()
yt = self.to_tensor(y)
xt = self.to_tensor(x)
out = self.model(xt)
loss = self.loss_fn(out, yt)
loss.backward()
loss_val = loss.cpu().detach().numpy()
self.loss_history.append(loss_val)
self.opt_inst.step()
return loss_val
def forward(self, x):
# will cache tensor with grad, this function is especially for bottom
# model
x = self.to_tensor(x)
out = self.model(x)
if self.fw_cached is not None:
raise ValueError('fed cached should be None when forward')
self.fw_cached = out
return out.cpu().detach().numpy()
def backward(self, error):
# backward ,this function is especially for bottom model
self.opt_inst.zero_grad()
error = self.to_tensor(error)
loss = self.loss_fn(self.fw_cached, error)
loss.backward()
self.fw_cached = None
self.opt_inst.step()
def predict(self, x):
with torch.no_grad():
return self.model(self.to_tensor(x)).cpu().detach().numpy()
def get_forward_loss_from_input(self, x, y, reduction='none'):
with torch.no_grad():
default_reduction = self.loss_fn.reduction
self.loss_fn.reduction = reduction
yt = self.to_tensor(y)
xt = self.to_tensor(x)
loss = self.loss_fn(self.model(xt), yt)
self.loss_fn.reduction = default_reduction
return list(map(float, loss.detach().numpy()))
def get_input_gradients(self, x, y):
yt = self.to_tensor(y)
xt = self.to_tensor(x).requires_grad_(True)
fw = self.model(xt)
loss = self.loss_fn(fw, yt)
grad = autograd.grad(loss, xt)
return [grad[0].detach().numpy()]
def get_loss(self):
return [self.loss_history[-1]]
@staticmethod
def get_model_bytes(model):
with tempfile.TemporaryFile() as f:
torch.save(model, f)
f.seek(0)
return f.read()
@staticmethod
def recover_model_bytes(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
model = torch.load(f)
return model
@staticmethod
def get_model_save_dict(model: t.nn.Module, model_define, optimizer: t.optim.Optimizer, optimizer_define,
loss_define):
with tempfile.TemporaryFile() as f:
save_dict = {
'nn_define': model_define,
'model': model.state_dict(),
'optimizer_define': optimizer_define,
'optimizer': optimizer.state_dict(),
'loss_define': loss_define
}
torch.save(save_dict, f)
f.seek(0)
return f.read()
@staticmethod
def recover_model_save_dict(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
save_dict = torch.load(f)
return save_dict
def restore_model(self, model_bytes):
save_dict = self.recover_model_save_dict(model_bytes)
self.nn_define = save_dict['nn_define']
opt_define = save_dict['optimizer_define']
# optimizer can be updated
# old define == new define, load state dict
if opt_define == self.optimizer_define:
opt_inst: t.optim.Optimizer = self.opt_inst
opt_inst.load_state_dict(save_dict['optimizer'])
# load state dict
self.model.load_state_dict(save_dict['model'])
return self
def export_model(self):
return self.get_model_save_dict(
self.model,
self.nn_define,
self.opt_inst,
self.optimizer_define,
self.loss_fn_define)
| 6,909 | 30.697248 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/protection_enhance/coae.py
|
from federatedml.util import LOGGER
from federatedml.util import consts
try:
import torch
import torch as t
from torch import nn
from torch.nn import Module
from torch.nn import functional as F
except ImportError:
Module = object
def entropy(tensor):
return -t.sum(tensor * t.log2(tensor))
def cross_entropy(p2, p1, reduction='mean'):
p2 = p2 + consts.FLOAT_ZERO # to avoid nan
assert p2.shape == p1.shape
if reduction == 'sum':
return -t.sum(p1 * t.log(p2))
elif reduction == 'mean':
return -t.mean(t.sum(p1 * t.log(p2), dim=1))
elif reduction == 'none':
return -t.sum(p1 * t.log(p2), dim=1)
else:
raise ValueError('unknown reduction')
def cross_entropy_for_one_hot(pred, target, reduce="mean"):
if reduce == "mean":
return torch.mean(torch.sum(- target * F.log_softmax(pred, dim=-1), 1))
elif reduce == "sum":
return torch.sum(torch.sum(- target * F.log_softmax(pred, dim=-1), 1))
else:
raise Exception("Does not support reduce [{}]".format(reduce))
def coae_loss(
label,
fake_label,
reconstruct_label,
lambda_1=10,
lambda_2=2,
verbose=False):
loss_a = cross_entropy(reconstruct_label, label) - \
lambda_1 * cross_entropy(fake_label, label)
loss_b = entropy(fake_label)
if verbose:
LOGGER.debug(
'loss a is {} {}'.format(
cross_entropy(
reconstruct_label, label), cross_entropy(
fake_label, label)))
LOGGER.debug('loss b is {}'.format(loss_b))
return loss_a - lambda_2 * loss_b
class CrossEntropy(object):
def __init__(self, reduction='mean'):
self.reduction = reduction
def __call__(self, p2, p1):
return cross_entropy(p2, p1, self.reduction)
class CoAE(Module):
def __init__(self, input_dim=2, encode_dim=None):
super(CoAE, self).__init__()
self.d = input_dim
if encode_dim is None:
encode_dim = (6 * input_dim) ** 2
self.encoder = nn.Sequential(
nn.Linear(input_dim, encode_dim),
nn.ReLU(),
nn.Linear(encode_dim, input_dim),
nn.Softmax(dim=1)
)
self.decoder = nn.Sequential(
nn.Linear(input_dim, encode_dim),
nn.ReLU(),
nn.Linear(encode_dim, input_dim),
nn.Softmax(dim=1)
)
def encode(self, x):
x = t.Tensor(x)
return self.encoder(x)
def decode(self, fake_labels):
fake_labels = t.Tensor(fake_labels)
return self.decoder(fake_labels)
def forward(self, x):
x = t.Tensor(x)
z = self.encoder(x)
return self.decoder(z), z
def train_an_autoencoder_confuser(
label_num,
epoch=50,
lambda1=1,
lambda2=2,
lr=0.001,
verbose=False):
coae = CoAE(label_num, )
labels = torch.eye(label_num)
opt = torch.optim.Adam(coae.parameters(), lr=lr)
for i in range(epoch):
opt.zero_grad()
fake_labels = coae.encode(labels)
reconstruct_labels = coae.decode(fake_labels)
loss = coae_loss(
labels,
fake_labels,
reconstruct_labels,
lambda1,
lambda2,
verbose=verbose)
loss.backward()
opt.step()
if verbose:
LOGGER.debug(
'origin labels {}, fake labels {}, reconstruct labels {}'.format(
labels, coae.encode(labels).detach().numpy(), coae.decode(
coae.encode(labels)).detach().numpy()))
return coae
def coae_label_reformat(labels, label_num):
LOGGER.debug('label shape is {}'.format(labels.shape))
labels = labels
if label_num == 1: # regression:
raise ValueError('label num ==1, regression task not support COAE')
else:
return nn.functional.one_hot(
t.Tensor(labels).flatten().type(
t.int64), label_num).numpy()
if __name__ == '__main__':
coae = train_an_autoencoder_confuser(
2,
epoch=1000,
verbose=True,
lambda1=2.0,
lambda2=1.0,
lr=0.02)
| 4,246 | 25.710692 | 79 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/protection_enhance/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/hetero/strategy/comparision.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
from sortedcontainers import SortedList
class Comparision(object):
def __init__(self, size):
self._histograms = collections.deque(maxlen=size)
self._sorted_hist = SortedList()
def add(self, value):
if len(self._histograms) == self._histograms.maxlen:
self._sorted_hist.remove(self._histograms[0])
self._histograms.append(value)
self._sorted_hist.add(value)
def _get_lt_count(self, value):
return self._sorted_hist.bisect_left(value=value)
def _get_le_count(self, value):
return self._sorted_hist.bisect_right(value=value)
def _get_size(self):
return len(self._histograms)
def get_rate(self, value):
return self._get_lt_count(value) / self._get_size()
def is_topk(self, value, k):
if self._get_size() <= k:
return True
return self._get_size() - self._get_le_count(value) < k
| 1,609 | 28.272727 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/strategy/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/hetero/strategy/selector.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.nn.hetero.strategy.comparision import Comparision
class RelativeSelector(object):
def __init__(self, max_size=None, beta=1, random_state=None, min_prob=0):
self._comparision = Comparision(size=max_size)
self._beta = beta
self._min_prob = min_prob
np.random.seed(random_state)
def select_batch_sample(self, samples):
select_ret = [False] * len(samples)
for sample in samples:
self._comparision.add(sample)
for idx, sample in enumerate(samples):
select_ret[idx] = max(
self._min_prob, np.power(
np.random.uniform(
0, 1), self._beta)) <= self._comparision.get_rate(sample)
return select_ret
class SelectorFactory(object):
@staticmethod
def get_selector(
method,
selective_size,
beta=1,
random_rate=None,
min_prob=0):
if not method:
return None
elif method == "relative":
return RelativeSelector(
selective_size,
beta,
random_state=random_rate,
min_prob=min_prob)
else:
raise ValueError("Back Propagation Selector {} not supported yet")
| 1,988 | 30.571429 | 81 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/interactive/base.py
|
import numpy as np
from federatedml.param.hetero_nn_param import HeteroNNParam
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
class InteractiveLayerBase(object):
def __init__(self, params: HeteroNNParam, **kwargs):
self.params = params
self.transfer_variable: BaseTransferVariables = None
def set_flow_id(self, flow_id):
if self.transfer_variable is not None:
self.transfer_variable.set_flowid(flow_id)
def set_batch(self, batch_size):
pass
def forward(self, x, epoch: int, batch: int, train: bool = True, **kwargs) -> np.ndarray:
pass
def backward(self, *args, **kwargs):
pass
def guest_backward(self, error, epoch: int, batch_idx: int, **kwargs):
pass
def host_backward(self, epoch: int, batch_idx: int, **kwargs):
pass
def export_model(self) -> bytes:
pass
def restore_model(self, model_bytes: bytes):
pass
def set_backward_select_strategy(self):
pass
class InteractiveLayerGuest(InteractiveLayerBase):
def __init__(self, params: HeteroNNParam, **kwargs):
super(InteractiveLayerGuest, self).__init__(params, **kwargs)
def backward(self, error, epoch: int, batch: int, **kwargs):
pass
class InteractiveLayerHost(InteractiveLayerBase):
def __init__(self, params: HeteroNNParam, **kwargs):
super(InteractiveLayerHost, self).__init__(params, **kwargs)
def backward(self, epoch: int, batch: int, **kwargs):
pass
| 1,559 | 25.896552 | 93 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/interactive/he_interactive_layer.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
import numpy as np
import torch
from torch import autograd
from federatedml.nn.hetero.interactive.base import InteractiveLayerGuest, InteractiveLayerHost
from federatedml.nn.hetero.nn_component.torch_model import backward_loss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
from federatedml.nn.backend.torch.serialization import recover_sequential_from_dict
from federatedml.util.fixpoint_solver import FixedPointEncoder
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import InteractiveLayerParam
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util import consts, LOGGER
from federatedml.nn.hetero.interactive.utils.numpy_layer import NumpyDenseLayerGuest, NumpyDenseLayerHost
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
from fate_arch.session import computing_session as session
from federatedml.nn.backend.utils.rng import RandomNumberGenerator
PLAINTEXT = False
class HEInteractiveTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.decrypted_guest_forward = self._create_variable(
name='decrypted_guest_forward', src=['host'], dst=['guest'])
self.decrypted_guest_weight_gradient = self._create_variable(
name='decrypted_guest_weight_gradient', src=['host'], dst=['guest'])
self.encrypted_acc_noise = self._create_variable(
name='encrypted_acc_noise', src=['host'], dst=['guest'])
self.encrypted_guest_forward = self._create_variable(
name='encrypted_guest_forward', src=['guest'], dst=['host'])
self.encrypted_guest_weight_gradient = self._create_variable(
name='encrypted_guest_weight_gradient', src=['guest'], dst=['host'])
self.encrypted_host_forward = self._create_variable(
name='encrypted_host_forward', src=['host'], dst=['guest'])
self.host_backward = self._create_variable(
name='host_backward', src=['guest'], dst=['host'])
self.selective_info = self._create_variable(
name="selective_info", src=["guest"], dst=["host"])
self.drop_out_info = self._create_variable(
name="drop_out_info", src=["guest"], dst=["host"])
self.drop_out_table = self._create_variable(
name="drop_out_table", src=["guest"], dst=["host"])
self.interactive_layer_output_unit = self._create_variable(
name="interactive_layer_output_unit", src=["guest"], dst=["host"])
class DropOut(object):
def __init__(self, rate, noise_shape):
self._keep_rate = rate
self._noise_shape = noise_shape
self._batch_size = noise_shape[0]
self._mask = None
self._partition = None
self._mask_table = None
self._select_mask_table = None
self._do_backward_select = False
self._mask_table_cache = {}
def forward(self, X):
if X.shape == self._mask.shape:
forward_x = X * self._mask / self._keep_rate
else:
forward_x = X * self._mask[0: len(X)] / self._keep_rate
return forward_x
def backward(self, grad):
if self._do_backward_select:
self._mask = self._select_mask_table[0: grad.shape[0]]
self._select_mask_table = self._select_mask_table[grad.shape[0]:]
return grad * self._mask / self._keep_rate
else:
if grad.shape == self._mask.shape:
return grad * self._mask / self._keep_rate
else:
return grad * self._mask[0: grad.shape[0]] / self._keep_rate
def generate_mask(self):
self._mask = np.random.uniform(
low=0, high=1, size=self._noise_shape) < self._keep_rate
def generate_mask_table(self, shape):
# generate mask table according to samples shape, because in some
# batches, sample_num < batch_size
if shape == self._noise_shape:
_mask_table = session.parallelize(
self._mask, include_key=False, partition=self._partition)
else:
_mask_table = session.parallelize(
self._mask[0: shape[0]], include_key=False, partition=self._partition)
return _mask_table
def set_partition(self, partition):
self._partition = partition
def select_backward_sample(self, select_ids):
select_mask_table = self._mask[np.array(select_ids)]
if self._select_mask_table is not None:
self._select_mask_table = np.vstack(
(self._select_mask_table, select_mask_table))
else:
self._select_mask_table = select_mask_table
def do_backward_select_strategy(self):
self._do_backward_select = True
class HEInteractiveLayerGuest(InteractiveLayerGuest):
def __init__(self, params=None, layer_config=None, host_num=1):
super(HEInteractiveLayerGuest, self).__init__(params)
# transfer var
self.host_num = host_num
self.layer_config = layer_config
self.transfer_variable = HEInteractiveTransferVariable()
self.plaintext = PLAINTEXT
self.layer_config = layer_config
self.host_input_shapes = []
self.rng_generator = RandomNumberGenerator()
self.learning_rate = params.interactive_layer_lr
# cached tensor
self.guest_tensor = None
self.host_tensors = None
self.dense_output_data_require_grad = None
self.activation_out_require_grad = None
# model
self.model: InteractiveLayer = None
self.guest_model = None
self.host_model_list = []
self.batch_size = None
self.partitions = 0
self.do_backward_select_strategy = False
self.optimizer = None
# drop out
self.drop_out_initiated = False
self.drop_out = None
self.drop_out_keep_rate = None
self.fixed_point_encoder = None if params.floating_point_precision is None else FixedPointEncoder(
2 ** params.floating_point_precision)
self.send_output_unit = False
# float64
self.float64 = False
"""
Init functions
"""
def set_flow_id(self, flow_id):
self.transfer_variable.set_flowid(flow_id)
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def set_partition(self, partition):
self.partitions = partition
def _build_model(self):
if self.model is None:
raise ValueError('torch interactive model is not initialized!')
for i in range(self.host_num):
host_model = NumpyDenseLayerHost()
host_model.build(self.model.host_model[i])
host_model.set_learning_rate(self.learning_rate)
self.host_model_list.append(host_model)
self.guest_model = NumpyDenseLayerGuest()
self.guest_model.build(self.model.guest_model)
self.guest_model.set_learning_rate(self.learning_rate)
if self.do_backward_select_strategy:
self.guest_model.set_backward_selective_strategy()
self.guest_model.set_batch(self.batch_size)
for host_model in self.host_model_list:
host_model.set_backward_selective_strategy()
host_model.set_batch(self.batch_size)
"""
Drop out functions
"""
def init_drop_out_parameter(self):
if isinstance(self.model.param_dict['dropout'], float):
self.drop_out_keep_rate = 1 - self.model.param_dict['dropout']
else:
self.drop_out_keep_rate = -1
self.transfer_variable.drop_out_info.remote(
self.drop_out_keep_rate, idx=-1, suffix=('dropout_rate', ))
self.drop_out_initiated = True
def _create_drop_out(self, shape):
if self.drop_out_keep_rate and self.drop_out_keep_rate != 1 and self.drop_out_keep_rate > 0:
if not self.drop_out:
self.drop_out = DropOut(
noise_shape=shape, rate=self.drop_out_keep_rate)
self.drop_out.set_partition(self.partitions)
if self.do_backward_select_strategy:
self.drop_out.do_backward_select_strategy()
self.drop_out.generate_mask()
@staticmethod
def expand_columns(tensor, keep_array):
shape = keep_array.shape
tensor = np.reshape(tensor, (tensor.size,))
keep = np.reshape(keep_array, (keep_array.size,))
ret_tensor = []
idx = 0
for x in keep:
if x == 0:
ret_tensor.append(0)
else:
ret_tensor.append(tensor[idx])
idx += 1
return np.reshape(np.array(ret_tensor), shape)
"""
Plaintext forward/backward, these interfaces are for testing
"""
def plaintext_forward(self, guest_input, epoch=0, batch=0, train=True):
if self.model is None:
self.model = recover_sequential_from_dict(self.layer_config)[0]
if self.float64:
self.model.type(torch.float64)
if self.optimizer is None:
self.optimizer = torch.optim.SGD(
params=self.model.parameters(), lr=self.learning_rate)
if train:
self.model.train()
else:
self.model.eval()
with torch.no_grad():
guest_tensor = torch.from_numpy(guest_input)
host_inputs = self.get_forward_from_host(
epoch, batch, train, idx=-1)
host_tensors = [torch.from_numpy(arr) for arr in host_inputs]
interactive_out = self.model(guest_tensor, host_tensors)
self.guest_tensor = guest_tensor
self.host_tensors = host_tensors
return interactive_out.cpu().detach().numpy()
def plaintext_backward(self, output_gradient, epoch, batch):
# compute input gradient
self.guest_tensor: torch.Tensor = self.guest_tensor.requires_grad_(True)
for tensor in self.host_tensors:
tensor.requires_grad_(True)
out = self.model(self.guest_tensor, self.host_tensors)
loss = backward_loss(out, torch.from_numpy(output_gradient))
backward_list = [self.guest_tensor]
backward_list.extend(self.host_tensors)
ret_grad = autograd.grad(loss, backward_list)
# update model
self.guest_tensor: torch.Tensor = self.guest_tensor.requires_grad_(False)
for tensor in self.host_tensors:
tensor.requires_grad_(False)
self.optimizer.zero_grad()
out = self.model(self.guest_tensor, self.host_tensors)
loss = backward_loss(out, torch.from_numpy(output_gradient))
loss.backward()
self.optimizer.step()
self.guest_tensor, self.host_tensors = None, None
for idx, host_grad in enumerate(ret_grad[1:]):
self.send_host_backward_to_host(host_grad, epoch, batch, idx=idx)
return ret_grad[0]
"""
Activation forward & backward
"""
def activation_forward(self, dense_out, with_grad=True):
if with_grad:
if (self.dense_output_data_require_grad is not None) or (
self.activation_out_require_grad is not None):
raise ValueError(
'torch forward error, related required grad tensors are not freed')
self.dense_output_data_require_grad = dense_out.requires_grad_(
True)
activation_out_ = self.model.activation(
self.dense_output_data_require_grad)
self.activation_out_require_grad = activation_out_
else:
with torch.no_grad():
activation_out_ = self.model.activation(dense_out)
return activation_out_.cpu().detach().numpy()
def activation_backward(self, output_gradients):
if self.activation_out_require_grad is None and self.dense_output_data_require_grad is None:
raise ValueError('related grad is None, cannot compute backward')
loss = backward_loss(
self.activation_out_require_grad,
torch.Tensor(output_gradients))
activation_backward_grad = torch.autograd.grad(
loss, self.dense_output_data_require_grad)
self.activation_out_require_grad = None
self.dense_output_data_require_grad = None
return activation_backward_grad[0].cpu().detach().numpy()
"""
Forward & Backward
"""
def print_log(self, descr, epoch, batch, train):
if train:
LOGGER.info("{} epoch {} batch {}"
"".format(descr, epoch, batch))
else:
LOGGER.info("predicting, {} pred iteration {} batch {}"
"".format(descr, epoch, batch))
def forward_interactive(
self,
encrypted_host_input,
epoch,
batch,
train=True):
self.print_log(
'get encrypted dense output of host model of',
epoch,
batch,
train)
mask_table_list = []
guest_nosies = []
host_idx = 0
for model, host_bottom_input in zip(
self.host_model_list, encrypted_host_input):
encrypted_fw = model(host_bottom_input, self.fixed_point_encoder)
mask_table = None
if train:
self._create_drop_out(encrypted_fw.shape)
if self.drop_out:
mask_table = self.drop_out.generate_mask_table(
encrypted_fw.shape)
if mask_table:
encrypted_fw = encrypted_fw.select_columns(mask_table)
mask_table_list.append(mask_table)
guest_forward_noise = self.rng_generator.fast_generate_random_number(
encrypted_fw.shape, encrypted_fw.partitions, keep_table=mask_table)
if self.fixed_point_encoder:
encrypted_fw += guest_forward_noise.encode(
self.fixed_point_encoder)
else:
encrypted_fw += guest_forward_noise
guest_nosies.append(guest_forward_noise)
self.send_guest_encrypted_forward_output_with_noise_to_host(
encrypted_fw.get_obj(), epoch, batch, idx=host_idx)
if mask_table:
self.send_interactive_layer_drop_out_table(
mask_table, epoch, batch, idx=host_idx)
host_idx += 1
# get list from hosts
decrypted_dense_outputs = self.get_guest_decrypted_forward_from_host(
epoch, batch, idx=-1)
merge_output = None
for idx, (outputs, noise) in enumerate(
zip(decrypted_dense_outputs, guest_nosies)):
out = PaillierTensor(outputs) - noise
if len(mask_table_list) != 0:
out = PaillierTensor(
out.get_obj().join(
mask_table_list[idx],
self.expand_columns))
if merge_output is None:
merge_output = out
else:
merge_output = merge_output + out
return merge_output
def forward(self, x, epoch: int, batch: int, train: bool = True, **kwargs):
self.print_log(
'interactive layer running forward propagation',
epoch,
batch,
train)
if self.plaintext:
return self.plaintext_forward(x, epoch, batch, train)
if self.model is None:
self.model = recover_sequential_from_dict(self.layer_config)[0]
LOGGER.debug('interactive model is {}'.format(self.model))
# for multi host cases
LOGGER.debug(
'host num is {}, len host model {}'.format(
self.host_num, len(
self.model.host_model)))
assert self.host_num == len(self.model.host_model), 'host number is {}, but host linear layer number is {},' \
'please check your interactive configuration, make sure' \
' that host layer number equals to host number' \
.format(self.host_num, len(self.model.host_model))
if self.float64:
self.model.type(torch.float64)
if train and not self.drop_out_initiated:
self.init_drop_out_parameter()
host_inputs = self.get_forward_from_host(epoch, batch, train, idx=-1)
host_bottom_inputs_tensor = []
host_input_shapes = []
for i in host_inputs:
pt = PaillierTensor(i)
host_bottom_inputs_tensor.append(pt)
host_input_shapes.append(pt.shape[1])
self.model.lazy_to_linear(x.shape[1], host_dims=host_input_shapes)
self.host_input_shapes = host_input_shapes
if self.guest_model is None:
LOGGER.info("building interactive layers' training model")
self._build_model()
if not self.partitions:
self.partitions = host_bottom_inputs_tensor[0].partitions
if not self.send_output_unit:
self.send_output_unit = True
for idx in range(self.host_num):
self.send_interactive_layer_output_unit(
self.host_model_list[idx].output_shape[0], idx=idx)
guest_output = self.guest_model(x)
host_output = self.forward_interactive(
host_bottom_inputs_tensor, epoch, batch, train)
if guest_output is not None:
dense_output_data = host_output + \
PaillierTensor(guest_output, partitions=self.partitions)
else:
dense_output_data = host_output
self.print_log(
"start to get interactive layer's activation output of",
epoch,
batch,
train)
if self.float64: # result after encrypt calculation is float 64
dense_out = torch.from_numpy(dense_output_data.numpy())
else:
dense_out = torch.Tensor(
dense_output_data.numpy()) # convert to float32
if self.do_backward_select_strategy:
for h in self.host_model_list:
h.activation_input = dense_out.cpu().detach().numpy()
# if is not backward strategy, can compute grad directly
if not train or self.do_backward_select_strategy:
with_grad = False
else:
with_grad = True
activation_out = self.activation_forward(
dense_out, with_grad=with_grad)
if train and self.drop_out:
return self.drop_out.forward(activation_out)
return activation_out
def backward_interactive(
self,
host_model,
activation_gradient,
epoch,
batch,
host_idx):
LOGGER.info(
"get encrypted weight gradient of epoch {} batch {}".format(
epoch, batch))
encrypted_weight_gradient = host_model.get_weight_gradient(
activation_gradient, encoder=self.fixed_point_encoder)
if self.fixed_point_encoder:
encrypted_weight_gradient = self.fixed_point_encoder.decode(
encrypted_weight_gradient)
noise_w = self.rng_generator.generate_random_number(
encrypted_weight_gradient.shape)
self.transfer_variable.encrypted_guest_weight_gradient.remote(
encrypted_weight_gradient +
noise_w,
role=consts.HOST,
idx=host_idx,
suffix=(
epoch,
batch,
))
LOGGER.info(
"get decrypted weight graident of epoch {} batch {}".format(
epoch, batch))
decrypted_weight_gradient = self.transfer_variable.decrypted_guest_weight_gradient.get(
idx=host_idx, suffix=(epoch, batch,))
decrypted_weight_gradient -= noise_w
encrypted_acc_noise = self.get_encrypted_acc_noise_from_host(
epoch, batch, idx=host_idx)
return decrypted_weight_gradient, encrypted_acc_noise
def backward(self, error, epoch: int, batch: int, selective_ids=None):
if self.plaintext:
return self.plaintext_backward(error, epoch, batch)
if selective_ids:
for host_model in self.host_model_list:
host_model.select_backward_sample(selective_ids)
self.guest_model.select_backward_sample(selective_ids)
if self.drop_out:
self.drop_out.select_backward_sample(selective_ids)
if self.do_backward_select_strategy:
# send to all host
self.send_backward_select_info(
selective_ids, len(error), epoch, batch, -1)
if len(error) > 0:
LOGGER.debug(
"interactive layer start backward propagation of epoch {} batch {}".format(
epoch, batch))
if not self.do_backward_select_strategy:
activation_gradient = self.activation_backward(error)
else:
act_input = self.host_model_list[0].get_selective_activation_input(
)
_ = self.activation_forward(torch.from_numpy(act_input), True)
activation_gradient = self.activation_backward(error)
if self.drop_out:
activation_gradient = self.drop_out.backward(
activation_gradient)
LOGGER.debug(
"interactive layer update guest weight of epoch {} batch {}".format(
epoch, batch))
# update guest model
guest_input_gradient = self.update_guest(activation_gradient)
LOGGER.debug('update host model weights')
for idx, host_model in enumerate(self.host_model_list):
# update host models
host_weight_gradient, acc_noise = self.backward_interactive(
host_model, activation_gradient, epoch, batch, host_idx=idx)
host_input_gradient = self.update_host(
host_model, activation_gradient, host_weight_gradient, acc_noise)
self.send_host_backward_to_host(
host_input_gradient.get_obj(), epoch, batch, idx=idx)
return guest_input_gradient
else:
return []
"""
Model update
"""
def update_guest(self, activation_gradient):
input_gradient = self.guest_model.get_input_gradient(
activation_gradient)
weight_gradient = self.guest_model.get_weight_gradient(
activation_gradient)
self.guest_model.update_weight(weight_gradient)
self.guest_model.update_bias(activation_gradient)
return input_gradient
def update_host(
self,
host_model,
activation_gradient,
weight_gradient,
acc_noise):
activation_gradient_tensor = PaillierTensor(
activation_gradient, partitions=self.partitions)
input_gradient = host_model.get_input_gradient(
activation_gradient_tensor, acc_noise, encoder=self.fixed_point_encoder)
host_model.update_weight(weight_gradient)
host_model.update_bias(activation_gradient)
return input_gradient
"""
Communication functions
"""
def send_interactive_layer_output_unit(self, shape, idx=0):
self.transfer_variable.interactive_layer_output_unit.remote(
shape, role=consts.HOST, idx=idx)
def send_backward_select_info(
self,
selective_ids,
gradient_len,
epoch,
batch,
idx):
self.transfer_variable.selective_info.remote(
(selective_ids, gradient_len), role=consts.HOST, idx=idx, suffix=(
epoch, batch,))
def send_host_backward_to_host(self, host_error, epoch, batch, idx):
self.transfer_variable.host_backward.remote(host_error,
role=consts.HOST,
idx=idx,
suffix=(epoch, batch,))
def get_forward_from_host(self, epoch, batch, train, idx=0):
return self.transfer_variable.encrypted_host_forward.get(
idx=idx, suffix=(epoch, batch, train))
def send_guest_encrypted_forward_output_with_noise_to_host(
self, encrypted_guest_forward_with_noise, epoch, batch, idx):
return self.transfer_variable.encrypted_guest_forward.remote(
encrypted_guest_forward_with_noise,
role=consts.HOST,
idx=idx,
suffix=(
epoch,
batch,
))
def send_interactive_layer_drop_out_table(
self, mask_table, epoch, batch, idx):
return self.transfer_variable.drop_out_table.remote(
mask_table, role=consts.HOST, idx=idx, suffix=(epoch, batch,))
def get_guest_decrypted_forward_from_host(self, epoch, batch, idx=0):
return self.transfer_variable.decrypted_guest_forward.get(
idx=idx, suffix=(epoch, batch,))
def get_encrypted_acc_noise_from_host(self, epoch, batch, idx=0):
return self.transfer_variable.encrypted_acc_noise.get(
idx=idx, suffix=(epoch, batch,))
"""
Model IO
"""
def transfer_np_model_to_torch_interactive_layer(self):
self.model = self.model.cpu()
if self.guest_model is not None:
guest_weight = self.guest_model.get_weight()
model: torch.nn.Linear = self.model.guest_model
model.weight.data.copy_(torch.Tensor(guest_weight))
if self.guest_model.bias is not None:
model.bias.data.copy_(torch.Tensor(self.guest_model.bias))
for host_np_model, torch_model in zip(
self.host_model_list, self.model.host_model):
host_weight = host_np_model.get_weight()
torch_model.weight.data.copy_(torch.Tensor(host_weight))
if host_np_model.bias is not None:
torch_model.bias.data.copy_(torch.Tensor(torch_model.bias))
def export_model(self):
self.transfer_np_model_to_torch_interactive_layer()
interactive_layer_param = InteractiveLayerParam()
interactive_layer_param.interactive_guest_saved_model_bytes = TorchNNModel.get_model_bytes(
self.model)
interactive_layer_param.host_input_shape.extend(self.host_input_shapes)
return interactive_layer_param
def restore_model(self, interactive_layer_param):
self.host_input_shapes = list(interactive_layer_param.host_input_shape)
self.model = TorchNNModel.recover_model_bytes(
interactive_layer_param.interactive_guest_saved_model_bytes)
self._build_model()
class HEInteractiveLayerHost(InteractiveLayerHost):
def __init__(self, params):
super(HEInteractiveLayerHost, self).__init__(params)
self.plaintext = PLAINTEXT
self.acc_noise = None
self.learning_rate = params.interactive_layer_lr
self.encrypter = self.generate_encrypter(params)
self.transfer_variable = HEInteractiveTransferVariable()
self.partitions = 1
self.input_shape = None
self.output_unit = None
self.rng_generator = RandomNumberGenerator()
self.do_backward_select_strategy = False
self.drop_out_init = False
self.drop_out_keep_rate = None
self.fixed_point_encoder = None if params.floating_point_precision is None else FixedPointEncoder(
2 ** params.floating_point_precision)
self.mask_table = None
"""
Init
"""
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition):
self.partitions = partition
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
"""
Forward & Backward
"""
def plaintext_forward(self, host_input, epoch, batch, train):
self.send_forward_to_guest(host_input, epoch, batch, train)
def plaintext_backward(self, epoch, batch):
return self.get_host_backward_from_guest(epoch, batch)
def forward(self, host_input, epoch=0, batch=0, train=True, **kwargs):
if self.plaintext:
self.plaintext_forward(host_input, epoch, batch, train)
return
if train and not self.drop_out_init:
self.drop_out_init = True
self.drop_out_keep_rate = self.transfer_variable.drop_out_info.get(
0, role=consts.GUEST, suffix=('dropout_rate', ))
if self.drop_out_keep_rate == -1:
self.drop_out_keep_rate = None
LOGGER.info(
"forward propagation: encrypt host_bottom_output of epoch {} batch {}".format(
epoch, batch))
host_input = PaillierTensor(host_input, partitions=self.partitions)
encrypted_host_input = host_input.encrypt(self.encrypter)
self.send_forward_to_guest(
encrypted_host_input.get_obj(), epoch, batch, train)
encrypted_guest_forward = PaillierTensor(
self.get_guest_encrypted_forward_from_guest(epoch, batch))
decrypted_guest_forward = encrypted_guest_forward.decrypt(
self.encrypter)
if self.fixed_point_encoder:
decrypted_guest_forward = decrypted_guest_forward.decode(
self.fixed_point_encoder)
if self.input_shape is None:
self.input_shape = host_input.shape[1]
self.output_unit = self.get_interactive_layer_output_unit()
if self.acc_noise is None:
self.acc_noise = np.zeros((self.input_shape, self.output_unit))
mask_table = None
if train and self.drop_out_keep_rate and self.drop_out_keep_rate < 1:
mask_table = self.get_interactive_layer_drop_out_table(
epoch, batch)
if mask_table:
decrypted_guest_forward_with_noise = decrypted_guest_forward + \
(host_input * self.acc_noise).select_columns(mask_table)
self.mask_table = mask_table
else:
noise_part = (host_input * self.acc_noise)
decrypted_guest_forward_with_noise = decrypted_guest_forward + noise_part
self.send_decrypted_guest_forward_with_noise_to_guest(
decrypted_guest_forward_with_noise.get_obj(), epoch, batch)
def backward(self, epoch, batch):
if self.plaintext:
return self.plaintext_backward(epoch, batch), []
do_backward = True
selective_ids = []
if self.do_backward_select_strategy:
selective_ids, do_backward = self.send_backward_select_info(
epoch, batch)
if not do_backward:
return [], selective_ids
encrypted_guest_weight_gradient = self.get_guest_encrypted_weight_gradient_from_guest(
epoch, batch)
LOGGER.info(
"decrypt weight gradient of epoch {} batch {}".format(
epoch, batch))
decrypted_guest_weight_gradient = self.encrypter.recursive_decrypt(
encrypted_guest_weight_gradient)
noise_weight_gradient = self.rng_generator.generate_random_number(
(self.input_shape, self.output_unit))
decrypted_guest_weight_gradient += noise_weight_gradient / self.learning_rate
self.send_guest_decrypted_weight_gradient_to_guest(
decrypted_guest_weight_gradient, epoch, batch)
LOGGER.info(
"encrypt acc_noise of epoch {} batch {}".format(
epoch, batch))
encrypted_acc_noise = self.encrypter.recursive_encrypt(self.acc_noise)
self.send_encrypted_acc_noise_to_guest(
encrypted_acc_noise, epoch, batch)
self.acc_noise += noise_weight_gradient
host_input_gradient = PaillierTensor(
self.get_host_backward_from_guest(epoch, batch))
host_input_gradient = host_input_gradient.decrypt(self.encrypter)
if self.fixed_point_encoder:
host_input_gradient = host_input_gradient.decode(
self.fixed_point_encoder).numpy()
else:
host_input_gradient = host_input_gradient.numpy()
return host_input_gradient, selective_ids
"""
Communication Function
"""
def send_backward_select_info(self, epoch, batch):
selective_ids, do_backward = self.transfer_variable.selective_info.get(
idx=0, suffix=(epoch, batch,))
return selective_ids, do_backward
def send_encrypted_acc_noise_to_guest(
self, encrypted_acc_noise, epoch, batch):
self.transfer_variable.encrypted_acc_noise.remote(encrypted_acc_noise,
idx=0,
role=consts.GUEST,
suffix=(epoch, batch,))
def get_interactive_layer_output_unit(self):
return self.transfer_variable.interactive_layer_output_unit.get(idx=0)
def get_guest_encrypted_weight_gradient_from_guest(self, epoch, batch):
encrypted_guest_weight_gradient = self.transfer_variable.encrypted_guest_weight_gradient.get(
idx=0, suffix=(epoch, batch,))
return encrypted_guest_weight_gradient
def get_interactive_layer_drop_out_table(self, epoch, batch):
return self.transfer_variable.drop_out_table.get(
idx=0, suffix=(epoch, batch,))
def send_forward_to_guest(self, encrypted_host_input, epoch, batch, train):
self.transfer_variable.encrypted_host_forward.remote(
encrypted_host_input, idx=0, role=consts.GUEST, suffix=(epoch, batch, train))
def send_guest_decrypted_weight_gradient_to_guest(
self, decrypted_guest_weight_gradient, epoch, batch):
self.transfer_variable.decrypted_guest_weight_gradient.remote(
decrypted_guest_weight_gradient, idx=0, role=consts.GUEST, suffix=(epoch, batch,))
def get_host_backward_from_guest(self, epoch, batch):
host_backward = self.transfer_variable.host_backward.get(
idx=0, suffix=(epoch, batch,))
return host_backward
def get_guest_encrypted_forward_from_guest(self, epoch, batch):
encrypted_guest_forward = self.transfer_variable.encrypted_guest_forward.get(
idx=0, suffix=(epoch, batch,))
return encrypted_guest_forward
def send_decrypted_guest_forward_with_noise_to_guest(
self, decrypted_guest_forward_with_noise, epoch, batch):
self.transfer_variable.decrypted_guest_forward.remote(
decrypted_guest_forward_with_noise,
idx=0,
role=consts.GUEST,
suffix=(
epoch,
batch,
))
"""
Encrypter
"""
def generate_encrypter(self, param):
LOGGER.info("generate encrypter")
if param.encrypt_param.method.lower() == consts.PAILLIER.lower():
encrypter = PaillierEncrypt()
encrypter.generate_key(param.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yet!!!")
return encrypter
"""
Model IO
"""
def export_model(self):
interactive_layer_param = InteractiveLayerParam()
interactive_layer_param.acc_noise = pickle.dumps(self.acc_noise)
return interactive_layer_param
def restore_model(self, interactive_layer_param):
self.acc_noise = pickle.loads(interactive_layer_param.acc_noise)
| 36,890 | 37.071207 | 122 |
py
|
FATE
|
FATE-master/python/federatedml/nn/hetero/interactive/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/hetero/interactive/utils/numpy_layer.py
|
import torch
import numpy as np
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
class NumpyDenseLayer(object):
"""
NumpyDenseLayer is designed for Pailler Tensor compute
"""
def __init__(self):
self.input = None
self.model_weight = None
self.model_shape = None
self.bias = None
self.lr = 1.0
self.role = None
self.is_empty_model = False
self.activation_input = None
self.input_cached = np.array([])
self.activation_cached = np.array([])
self.do_backward_selective_strategy = False
self.batch_size = None
def set_backward_selective_strategy(self):
self.do_backward_selective_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def build(self, torch_linear: torch.nn.Linear):
if torch_linear is None:
if self.role == "host":
raise ValueError("host input is empty!")
self.is_empty_model = True
return
assert isinstance(
torch_linear, torch.nn.Linear), 'must use a torch Linear to build this class, but got {}' .format(torch_linear)
self.model_weight = torch_linear.weight.cpu().detach().numpy().transpose()
if torch_linear.bias is not None:
self.bias = torch_linear.bias.cpu().detach().numpy()
def export_model(self):
if self.is_empty_model:
return "".encode()
layer_weights = [self.model_weight]
return layer_weights
def get_selective_activation_input(self):
self.activation_input = self.activation_cached[: self.batch_size]
self.activation_cached = self.activation_cached[self.batch_size:]
return self.activation_input
def get_weight(self):
return self.model_weight.transpose()
def get_bias(self):
return self.bias
def set_learning_rate(self, lr):
self.lr = lr
def forward(self, x, **kwargs):
pass
def get_weight_gradient(self, delta):
pass
def restore_model(self, model_bytes):
pass
def update_weight(self, delta):
pass
def update_bias(self, delta):
pass
@property
def empty(self):
return self.is_empty_model
@property
def output_shape(self):
return self.model_weight.shape[1:]
def __repr__(self):
return 'model weights: {}, model bias {}'.format(
self.model_weight, self.bias)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
class NumpyDenseLayerGuest(NumpyDenseLayer):
def __init__(self):
super(NumpyDenseLayerGuest, self).__init__()
self.role = consts.GUEST
def forward(self, x):
if self.empty:
return None
self.input = x
output = np.matmul(x, self.model_weight)
if self.bias is not None:
output += self.bias
return output
def select_backward_sample(self, selective_ids):
if self.input_cached.shape[0] == 0:
self.input_cached = self.input[selective_ids]
else:
self.input_cached = np.vstack(
(self.input_cached, self.input[selective_ids])
)
def get_input_gradient(self, delta):
if self.empty:
return None
error = np.matmul(delta, self.model_weight.T)
return error
def get_weight_gradient(self, delta):
if self.empty:
return None
if self.do_backward_selective_strategy:
self.input = self.input_cached[: self.batch_size]
self.input_cached = self.input_cached[self.batch_size:]
delta_w = np.matmul(delta.T, self.input)
return delta_w
def update_weight(self, delta):
if self.empty:
return None
self.model_weight -= self.lr * delta.T
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
class NumpyDenseLayerHost(NumpyDenseLayer):
"""
This dense layer can directly compute pallier-tensor forward
"""
def __init__(self):
super(NumpyDenseLayerHost, self).__init__()
self.role = consts.HOST
def select_backward_sample(self, selective_ids):
cached_shape = self.input_cached.shape[0]
offsets = [i + cached_shape for i in range(len(selective_ids))]
id_map = dict(zip(selective_ids, offsets))
if cached_shape == 0:
self.input_cached = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(self.input_cached)
self.activation_cached = self.activation_input[selective_ids]
else:
selective_input = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj().union(selective_input)
)
self.activation_cached = np.vstack(
(self.activation_cached, self.activation_input[selective_ids])
)
def forward(self, x, encoder=None):
self.input = x
if encoder is not None:
output = x * encoder.encode(self.model_weight)
else:
output = x * self.model_weight
if self.bias is not None:
if encoder is not None:
output += encoder.encode(self.bias)
else:
output += self.bias
return output
def get_input_gradient(self, delta, acc_noise, encoder=None):
if not encoder:
error = delta * self.model_weight.T + delta * acc_noise.T
else:
error = delta.encode(encoder) * (self.model_weight + acc_noise).T
return error
def get_weight_gradient(self, delta, encoder=None):
if self.do_backward_selective_strategy:
batch_size = self.batch_size
self.input = PaillierTensor(
self.input_cached.get_obj().filter(lambda k, v: k < batch_size)
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj()
.filter(lambda k, v: k >= batch_size)
.map(lambda k, v: (k - batch_size, v))
)
if encoder:
delta_w = self.input.fast_matmul_2d(encoder.encode(delta))
else:
delta_w = self.input.fast_matmul_2d(delta)
return delta_w
def update_weight(self, delta):
self.model_weight -= delta * self.lr
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
| 6,956 | 27.62963 | 123 |
py
|
FATE
|
FATE-master/python/federatedml/nn/loss/cross_entropy.py
|
import torch as t
from federatedml.util import consts
from torch.nn.functional import one_hot
def cross_entropy(p2, p1, reduction='mean'):
p2 = p2 + consts.FLOAT_ZERO # to avoid nan
assert p2.shape == p1.shape
if reduction == 'sum':
return -t.sum(p1 * t.log(p2))
elif reduction == 'mean':
return -t.mean(t.sum(p1 * t.log(p2), dim=1))
elif reduction == 'none':
return -t.sum(p1 * t.log(p2), dim=1)
else:
raise ValueError('unknown reduction')
class CrossEntropyLoss(t.nn.Module):
"""
A CrossEntropy Loss that will not compute Softmax
"""
def __init__(self, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self.reduction = reduction
def forward(self, pred, label):
one_hot_label = one_hot(label.flatten())
loss_ = cross_entropy(pred, one_hot_label, self.reduction)
return loss_
| 913 | 25.114286 | 66 |
py
|
FATE
|
FATE-master/python/federatedml/nn/loss/weighted_loss.py
|
import torch as t
from torch.nn import BCELoss
class WeightedBCE(t.nn.Module):
def __init__(self) -> None:
super().__init__()
self.loss_fn = BCELoss(reduce=False)
def forward(self, pred, label_and_weight):
label, weights = label_and_weight
losses = self.loss_fn(pred, label)
losses = losses * weights
loss_val = losses.sum() / weights.sum()
return loss_val
| 425 | 24.058824 | 47 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/statics.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import math
import numpy as np
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.feature.binning.quantile_summaries import QuantileSummaries
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.statistic import data_overview
# from federatedml.statistic.feature_statistic import feature_statistic
from federatedml.util import LOGGER
from federatedml.util import consts
class SummaryStatistics(object):
def __init__(self, length, abnormal_list=None, stat_order=2, bias=True):
self.abnormal_list = abnormal_list
self.sum = np.zeros(length)
self.sum_square = np.zeros(length)
self.max_value = -np.inf * np.ones(length)
self.min_value = np.inf * np.ones(length)
self.count = np.zeros(length)
self.length = length
self.stat_order = stat_order
self.bias = bias
m = 3
while m <= stat_order:
exp_sum_m = np.zeros(length)
setattr(self, f"exp_sum_{m}", exp_sum_m)
m += 1
def add_rows(self, rows):
"""
When getting E(x^n), the formula are:
.. math::
(i-1)/i * S_{i-1} + 1/i * x_i
where i is the current count, and S_i is the current expectation of x
"""
# if self.abnormal_list is None:
if not self.abnormal_list:
rows = np.array(rows, dtype=float)
self.count += 1
self.sum += rows
self.sum_square += rows ** 2
self.max_value = np.max([self.max_value, rows], axis=0)
self.min_value = np.min([self.min_value, rows], axis=0)
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
# exp_sum_m += rows ** m
exp_sum_m = (self.count - 1) / self.count * exp_sum_m + rows ** m / self.count
setattr(self, f"exp_sum_{m}", exp_sum_m)
else:
filter_rows = []
filter_idx = []
for idx, value in enumerate(rows):
if value in self.abnormal_list or (isinstance(value, float) and np.isnan(value)):
continue
try:
value = float(value)
except ValueError as e:
raise ValueError(f"In add func, value should be either a numeric input or be listed in "
f"abnormal list. Error info: {e}")
filter_rows.append(value)
filter_idx.append(idx)
if not filter_idx:
return
filter_rows = np.array(filter_rows, dtype=float)
filter_idx = np.array(filter_idx)
self.count[filter_idx] += 1
self.sum[filter_idx] += filter_rows
self.sum_square[filter_idx] += filter_rows ** 2
self.max_value[filter_idx] = np.max([self.max_value[filter_idx], filter_rows], axis=0)
self.min_value[filter_idx] = np.min([self.min_value[filter_idx], filter_rows], axis=0)
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
# exp_sum_m[filter_idx] += filter_rows ** m
exp_sum_m[filter_idx] = (self.count[filter_idx] - 1) / self.count[filter_idx] * \
exp_sum_m[filter_idx] + filter_rows ** m / self.count[filter_idx]
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
for idx, value in enumerate(rows):
if value in self.abnormal_list:
continue
try:
value = float(value)
except ValueError as e:
raise ValueError(f"In add func, value should be either a numeric input or be listed in "
f"abnormal list. Error info: {e}")
self.count[idx] += 1
self.sum[idx] += value
self.sum_square[idx] += value ** 2
self.max_value[idx] = np.max([self.max_value[idx], value])
self.min_value[idx] = np.min([self.min_value[idx], value])
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
exp_sum_m[idx] = (self.count[idx] - 1) / self.count[idx] * \
exp_sum_m[idx] + rows[idx] ** m / self.count[idx]
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
def merge(self, other):
if self.stat_order != other.stat_order:
raise AssertionError("Two merging summary should have same order.")
self.sum += other.sum
self.sum_square += other.sum_square
self.max_value = np.max([self.max_value, other.max_value], axis=0)
self.min_value = np.min([self.min_value, other.min_value], axis=0)
for m in range(3, self.stat_order + 1):
sum_m_1 = getattr(self, f"exp_sum_{m}")
sum_m_2 = getattr(other, f"exp_sum_{m}")
exp_sum = (sum_m_1 * self.count + sum_m_2 * other.count) / (self.count + other.count)
setattr(self, f"exp_sum_{m}", exp_sum)
self.count += other.count
return self
"""
def summary(self):
for m in range(3, self.stat_order + 1):
exp_sum_m = getattr(self, f"exp_sum_{m}")
for idx, cnt in enumerate(self.count):
if np.abs(cnt) < consts.FLOAT_ZERO:
continue
exp_sum_m[idx] /= cnt
setattr(self, f"exp_sum_{m}", exp_sum_m)
"""
@property
def mean(self):
return self.sum / self.count
@property
def max(self):
return self.max_value
@property
def min(self):
return self.min_value
@property
def variance(self):
mean = self.mean
variance = self.sum_square / self.count - mean ** 2
variance = np.array([x if math.fabs(x) >= consts.FLOAT_ZERO else 0.0 for x in variance])
return variance
@property
def coefficient_of_variance(self):
mean = np.array([consts.FLOAT_ZERO if math.fabs(x) < consts.FLOAT_ZERO else x
for x in self.mean])
return np.fabs(self.stddev / mean)
@property
def stddev(self):
return np.sqrt(self.variance)
@property
def moment_3(self):
"""
In mathematics, a moment is a specific quantitative measure of the shape of a function.
where the k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \\sum_{i = 1}^n (x_i - \bar{x})^k
the 3rd central moment is often used to calculate the coefficient of skewness
"""
if self.stat_order < 3:
raise ValueError("The third order of expectation sum has not been statistic.")
exp_sum_2 = self.sum_square / self.count
exp_sum_3 = getattr(self, "exp_sum_3")
mu = self.mean
return exp_sum_3 - 3 * mu * exp_sum_2 + 2 * mu ** 3
@property
def moment_4(self):
"""
In mathematics, a moment is a specific quantitative measure of the shape of a function.
where the k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \\ sum_{i = 1}^n (x_i - \bar{x})^k
the 4th central moment is often used to calculate the coefficient of kurtosis
"""
if self.stat_order < 3:
raise ValueError("The third order of expectation sum has not been statistic.")
exp_sum_2 = self.sum_square / self.count
exp_sum_3 = getattr(self, "exp_sum_3")
exp_sum_4 = getattr(self, "exp_sum_4")
mu = self.mean
return exp_sum_4 - 4 * mu * exp_sum_3 + 6 * mu ** 2 * exp_sum_2 - 3 * mu ** 4
@property
def skewness(self):
"""
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\\sum_{n=1}^N(x[n]-\bar{x})^i
If the bias is False, return the adjusted Fisher-Pearson standardized moment coefficient
i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
"""
m2 = self.variance
m3 = self.moment_3
n = self.count
zero = (m2 == 0)
np.seterr(divide='ignore', invalid='ignore')
vals = np.where(zero, 0, m3 / m2 ** 1.5)
if not self.bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2 ** 1.5
np.place(vals, can_correct, nval)
return vals
@property
def kurtosis(self):
"""
Return the sample excess kurtosis which
.. math::
g = \frac{m_4}{m_2^2} - 3
If bias is False, the calculations are corrected for statistical bias.
"""
m2 = self.variance
m4 = self.moment_4
n = self.count
zero = (m2 == 0)
np.seterr(divide='ignore', invalid='ignore')
result = np.where(zero, 0, m4 / m2 ** 2.0)
if not self.bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0 / (n - 2) / (n - 3) * ((n ** 2 - 1.0) * m4 / m2 ** 2.0 - 3 * (n - 1) ** 2.0)
np.place(result, can_correct, nval + 3.0)
return result - 3
class MissingStatistic(object):
def __init__(self, missing_val=None):
super(MissingStatistic, self).__init__()
self.missing_val = None
self.feature_summary = {}
self.count_summary = {}
self.missing_feature = []
self.all_feature_list = []
self.tag_id_mapping, self.id_tag_mapping = {}, {}
self.dense_missing_val = missing_val
@staticmethod
def is_sparse(tb):
return isinstance(tb.take(1)[0][1].features, SparseVector)
@staticmethod
def check_table_content(tb):
if not tb.count() > 0:
raise ValueError('input table must contains at least 1 sample')
first_ = tb.take(1)[0][1]
if isinstance(first_, Instance):
return True
else:
raise ValueError('unknown input format')
def fit(self, tb):
LOGGER.debug('start to compute feature lost ratio')
if not self.check_table_content(tb):
raise ValueError('contents of input table must be instances of class “Instance"')
header = tb.schema['header']
self.all_feature_list = header
self.tag_id_mapping = {v: k for k, v in enumerate(header)}
self.id_tag_mapping = {k: v for k, v in enumerate(header)}
feature_count_rs = self.count_feature_ratio(tb, self.tag_id_mapping, not self.is_sparse(tb),
missing_val=self.missing_val)
total_count = tb.count()
for idx, count_val in enumerate(feature_count_rs):
self.feature_summary[self.id_tag_mapping[idx]] = 1 - (count_val / total_count)
self.count_summary[self.id_tag_mapping[idx]] = int(total_count - count_val)
if (count_val / total_count) == 0:
self.missing_feature.append(self.id_tag_mapping[idx])
return self.feature_summary
@staticmethod
def count_feature_ratio(tb, tag_id_mapping, dense_input, missing_val=None):
func = functools.partial(MissingStatistic.map_partitions_count, tag_id_mapping=tag_id_mapping,
dense_input=dense_input,
missing_val=missing_val)
rs = tb.applyPartitions(func)
return rs.reduce(MissingStatistic.reduce_count_rs)
@staticmethod
def map_partitions_count(iterable, tag_id_mapping, dense_input=True, missing_val=None):
count_arr = np.zeros(len(tag_id_mapping))
for k, v in iterable:
# in dense input, missing feature is set as np.nan
if dense_input:
feature = v.features # a numpy array
arr = np.array(list(feature))
if missing_val is None:
idx_arr = np.argwhere(~np.isnan(arr)).flatten()
else:
idx_arr = np.argwhere(~(arr == missing_val)).flatten()
# in sparse input, missing features have no key in the dict
else:
feature = v.features.sparse_vec # a dict
idx_arr = np.array(list(feature.keys()))
if len(idx_arr) != 0:
count_arr[idx_arr] += 1
return count_arr
@staticmethod
def reduce_count_rs(arr1, arr2):
return arr1 + arr2
class MultivariateStatisticalSummary(object):
"""
"""
def __init__(self, data_instances, cols_index=-1, abnormal_list=None,
error=consts.DEFAULT_RELATIVE_ERROR, stat_order=2, bias=True):
self.finish_fit_statics = False # Use for static data
# self.finish_fit_summaries = False # Use for quantile data
self.binning_obj: QuantileBinning = None
self.summary_statistics = None
self.header = None
# self.quantile_summary_dict = {}
self.cols_dict = {}
# self.medians = None
self.data_instances = data_instances
self.cols_index = None
if not isinstance(abnormal_list, list):
abnormal_list = [abnormal_list]
self.abnormal_list = abnormal_list
self.__init_cols(data_instances, cols_index, stat_order, bias)
self.label_summary = None
self.error = error
self.missing_static_obj: MissingStatistic = None
def __init_cols(self, data_instances, cols_index, stat_order, bias):
header = data_overview.get_header(data_instances)
self.header = header
if cols_index == -1:
self.cols_index = [i for i in range(len(header))]
else:
self.cols_index = cols_index
LOGGER.debug(f"col_index: {cols_index}, self.col_index: {self.cols_index}")
self.cols_dict = {header[indices]: indices for indices in self.cols_index}
self.summary_statistics = SummaryStatistics(length=len(self.cols_index),
abnormal_list=self.abnormal_list,
stat_order=stat_order,
bias=bias)
def _static_sums(self):
"""
Statics sum, sum_square, max_value, min_value,
so that variance is available.
"""
is_sparse = data_overview.is_sparse_data(self.data_instances)
partition_cal = functools.partial(self.static_in_partition,
cols_index=self.cols_index,
summary_statistics=copy.deepcopy(self.summary_statistics),
is_sparse=is_sparse)
self.summary_statistics = self.data_instances.applyPartitions(partition_cal). \
reduce(lambda x, y: self.copy_merge(x, y))
# self.summary_statistics = summary_statistic_dict.reduce(self.aggregate_statics)
self.finish_fit_statics = True
def _static_quantile_summaries(self):
"""
Static summaries so that can query a specific quantile point
"""
if self.binning_obj is not None:
return self.binning_obj
bin_param = FeatureBinningParam(bin_num=2, bin_indexes=self.cols_index,
error=self.error)
self.binning_obj = QuantileBinning(bin_param, abnormal_list=self.abnormal_list)
self.binning_obj.fit_split_points(self.data_instances)
return self.binning_obj
@staticmethod
def copy_merge(s1, s2):
new_s1 = copy.deepcopy(s1)
return new_s1.merge(s2)
@staticmethod
def static_in_partition(data_instances, cols_index, summary_statistics, is_sparse):
"""
Statics sums, sum_square, max and min value through one traversal
Parameters
----------
data_instances : Table
The input data
cols_index : indices
Specify which column(s) need to apply statistic.
summary_statistics: SummaryStatistics
Returns
-------
Dict of SummaryStatistics object
"""
cols_index_set = set(cols_index)
for k, instances in data_instances:
if not is_sparse:
if isinstance(instances, Instance):
features = instances.features
else:
features = instances
# try:
# features = np.array(instances, dtype=float)
# except ValueError as e:
# raise ValueError(f"Static Module accept numeric input only. Error info: {e}")
# LOGGER.debug(f"In statics, features: {features}")
# row_values = [x for idx, x in enumerate(features) if idx in cols_index]
row_values = [x for idx, x in enumerate(features) if idx in cols_index_set]
# row_values = features[cols_index]
else:
sparse_data = instances.features.get_sparse_vector()
row_values = np.array([sparse_data.get(x, 0) for x in cols_index])
summary_statistics.add_rows(row_values)
# summary_statistics.summary()
return summary_statistics
@staticmethod
def static_summaries_in_partition(data_instances, cols_dict, abnormal_list, error):
"""
Statics sums, sum_square, max and min value through one traversal
Parameters
----------
data_instances : Table
The input data
cols_dict : dict
Specify which column(s) need to apply statistic.
abnormal_list: list
Specify which values are not permitted.
Returns
-------
Dict of SummaryStatistics object
"""
summary_dict = {}
for col_name in cols_dict:
summary_dict[col_name] = QuantileSummaries(abnormal_list=abnormal_list, error=error)
for k, instances in data_instances:
if isinstance(instances, Instance):
features = instances.features
else:
features = instances
for col_name, col_index in cols_dict.items():
value = features[col_index]
summary_obj = summary_dict[col_name]
summary_obj.insert(value)
return summary_dict
@staticmethod
def aggregate_statics(s_dict1, s_dict2):
if s_dict1 is None and s_dict2 is None:
return None
if s_dict1 is None:
return s_dict2
if s_dict2 is None:
return s_dict1
new_dict = {}
for col_name, static_1 in s_dict1.items():
static_1.merge(s_dict2[col_name])
new_dict[col_name] = static_1
return new_dict
def get_median(self):
if self.binning_obj is None:
self._static_quantile_summaries()
medians = self.binning_obj.query_quantile_point(query_points=0.5)
return medians
@property
def median(self):
median_dict = self.get_median()
return np.array([median_dict[self.header[idx]] for idx in self.cols_index])
def get_quantile_point(self, quantile):
"""
Return the specific quantile point value
Parameters
----------
quantile : float, 0 <= quantile <= 1
Specify which column(s) need to apply statistic.
Returns
-------
return a dict of result quantile points.
eg.
quantile_point = {"x1": 3, "x2": 5... }
"""
if self.binning_obj is None:
self._static_quantile_summaries()
quantile_points = self.binning_obj.query_quantile_point(quantile)
return quantile_points
def get_mean(self):
"""
Return the mean value(s) of the given column
Returns
-------
return a dict of result mean.
"""
return self.get_statics("mean")
def get_variance(self):
return self.get_statics("variance")
def get_std_variance(self):
return self.get_statics("stddev")
def get_max(self):
return self.get_statics("max_value")
def get_min(self):
return self.get_statics("min_value")
def get_statics(self, data_type):
"""
Return the specific static value(s) of the given column
Parameters
----------
data_type : str, "mean", "variance", "std_variance", "max_value" or "mim_value"
Specify which type to show.
Returns
-------
return a list of result result. The order is the same as cols.
"""
if not self.finish_fit_statics:
self._static_sums()
if hasattr(self.summary_statistics, data_type):
result_row = getattr(self.summary_statistics, data_type)
elif hasattr(self, data_type):
result_row = getattr(self, data_type)
else:
raise ValueError(f"Statistic data type: {data_type} cannot be recognized")
# LOGGER.debug(f"col_index: {self.cols_index}, result_row: {result_row},"
# f"header: {self.header}, data_type: {data_type}")
result = {}
result_row = result_row.tolist()
for col_idx, header_idx in enumerate(self.cols_index):
result[self.header[header_idx]] = result_row[col_idx]
return result
def get_missing_ratio(self):
return self.get_statics("missing_ratio")
@property
def missing_ratio(self):
self.missing_static_obj = MissingStatistic()
all_missing_ratio = self.missing_static_obj.fit(self.data_instances)
return np.array([all_missing_ratio[self.header[idx]] for idx in self.cols_index])
@property
def missing_count(self):
# missing_ratio = self.missing_ratio
# missing_count = missing_ratio * self.data_instances.count()
# return missing_count.astype(int)
if self.missing_static_obj is None:
self.missing_static_obj = MissingStatistic()
self.missing_static_obj.fit(self.data_instances)
all_missing_count = self.missing_static_obj.count_summary
return np.array([all_missing_count[self.header[idx]] for idx in self.cols_index])
@staticmethod
def get_label_static_dict(data_instances):
result_dict = {}
for instance in data_instances:
label_key = instance[1].label
if label_key not in result_dict:
result_dict[label_key] = 1
else:
result_dict[label_key] += 1
return result_dict
@staticmethod
def merge_result_dict(dict_a, dict_b):
for k, v in dict_b.items():
if k in dict_a:
dict_a[k] += v
else:
dict_a[k] = v
return dict_a
def get_label_histogram(self):
label_histogram = self.data_instances.applyPartitions(self.get_label_static_dict).reduce(self.merge_result_dict)
return label_histogram
| 24,473 | 35.257778 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/data_overview.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import json
from collections import Counter
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.util import LOGGER
from federatedml.util import consts
def get_features_shape(data_instances):
one_feature = data_instances.first()
instance = one_feature[1]
if instance is None:
return None
if one_feature is not None:
if type(one_feature[1].features).__name__ == consts.SPARSE_VECTOR:
return one_feature[1].features.get_shape()
else:
return one_feature[1].features.shape[0]
else:
return None
def get_instance_shape(instance):
if instance is None:
return None
if type(instance.features).__name__ == consts.SPARSE_VECTOR:
return instance.features.get_shape()
else:
return instance.features.shape[0]
def get_anonymous_header(data_instances):
anonymous_header = data_instances.schema.get('anonymous_header') # ['x1', 'x2', 'x3' ... ]
return anonymous_header
def look_up_names_from_header(name_list, source_header, transform_header):
"""
Parameters
----------
name_list: list or str, list of feature name(s)
source_header: table header containing name_list
transform_header: table header into which name_list to be transformed
Returns
-------
list of plaintext feature names
"""
if name_list is None:
return
if len(source_header) != len(transform_header):
raise ValueError(f"Length of source header and transform header do not match, please check.")
if not isinstance(name_list, list):
name_list = [name_list]
name_set = set(name_list)
# name list contains repeated name
if len(name_set) < len(name_list):
LOGGER.debug(f"Repeated name(s) found in provided name_list: {name_list}.")
name_set = name_list
feature_names = [f_name for i, f_name in enumerate(transform_header) if source_header[i] in name_set]
if len(feature_names) < len(name_set):
raise ValueError(f"Cannot match all provided names from: {name_list} to given header, "
f"please check.")
return feature_names
def max_abs_sample_weight_map_func(kv_iter):
max_weight = -1
for k, inst in kv_iter:
if np.abs(inst.weight) > max_weight:
max_weight = np.abs(inst.weight)
return max_weight
def max_sample_weight_cmp(v1, v2):
return v1 if v1 > v2 else v2
def get_max_sample_weight(data_inst_with_weight):
inter_rs = data_inst_with_weight.applyPartitions(max_abs_sample_weight_map_func)
max_weight = inter_rs.reduce(max_sample_weight_cmp)
return max_weight
def check_negative_sample_weight(kv_iterator):
for k, v in kv_iterator:
if isinstance(v, Instance) and v.weight is not None:
if v.weight < 0:
return True
return False
def header_alignment(data_instances, pre_header, pre_anonymous_header=None):
header = [col.strip() for col in data_instances.schema["header"]]
if len((set(header) & set(pre_header))) != len(pre_header):
raise ValueError(f"fit & transform data' header should be the same! "
f"Previous header: {pre_header}. "
f"Current header: {header}.")
if pre_header == header:
if pre_anonymous_header:
data_instances.schema["anonymous_header"] = pre_anonymous_header
return data_instances
if len(pre_header) != len(header):
LOGGER.warning(
"header in prediction stage is super-set training stage, predict size is {}, training header size is {}".format(
len(header), len(pre_header)))
else:
LOGGER.warning("header in prediction stage will be shuffled to match the header of training stage")
header_idx_mapping = dict(zip(pre_header, [i for i in range(len(pre_header))]))
header_correct = {}
for i in range(len(header)):
col = header[i]
if col not in header_idx_mapping:
continue
header_correct[i] = header_idx_mapping[col]
def align_header(inst, header_pos=None):
if type(inst.features).__name__ == consts.SPARSE_VECTOR:
shape = len(header_pos)
new_data = {}
for k, v in inst.features.get_all_data():
if k not in header_pos:
continue
new_data[header_pos.get(k)] = v
inst_new = copy.deepcopy(inst)
inst_new.features.set_shape(shape)
inst_new.features.set_sparse_vector(new_data)
else:
col_order = [None] * len(header_pos)
for k, v in header_pos.items():
col_order[v] = k
inst_new = copy.deepcopy(inst)
inst_new.features = inst.features[col_order]
return inst_new
correct_schema = data_instances.schema
correct_schema["header"] = pre_header
if pre_anonymous_header:
correct_schema["anonymous_header"] = pre_anonymous_header
data_instances = data_instances.mapValues(lambda inst: align_header(inst, header_pos=header_correct))
data_instances.schema = correct_schema
return data_instances
def get_data_shape(data):
one_feature = data.first()
if one_feature is not None:
return len(list(one_feature[1]))
else:
return None
def get_header(data_instances):
header = data_instances.schema.get('header') # ['x1', 'x2', 'x3' ... ]
return header
def is_empty_feature(data_instances):
shape_of_feature = get_features_shape(data_instances)
if shape_of_feature is None or shape_of_feature == 0:
return True
return False
def is_sparse_data(data_instance):
first_data = data_instance.first()
if type(first_data[1]).__name__ in ['ndarray', 'list', 'tuple']:
return False
data_feature = first_data[1].features
if type(data_feature).__name__ == "ndarray":
return False
else:
return True
def count_labels(data_instance):
def _count_labels(instances):
labels = set()
for idx, instance in instances:
label = instance.label
labels.add(label)
return labels
label_set = data_instance.applyPartitions(_count_labels)
label_set = label_set.reduce(lambda x1, x2: x1.union(x2))
return len(label_set)
# if len(label_set) != 2:
# return False
# return True
def with_weight(data_instances):
first_entry = data_instances.first()[1]
if isinstance(first_entry, Instance) and first_entry.weight is not None:
return True
return False
def get_class_dict(kv_iterator):
class_dict = {}
for _, inst in kv_iterator:
count = class_dict.get(inst.label, 0)
class_dict[inst.label] = count + 1
if len(class_dict.keys()) > consts.MAX_CLASSNUM:
raise ValueError("In Classify Task, max dif classes should be no more than %d" % (consts.MAX_CLASSNUM))
return class_dict
def get_label_count(data_instances):
class_weight = data_instances.mapPartitions(get_class_dict).reduce(
lambda x, y: dict(Counter(x) + Counter(y)))
return class_weight
def get_predict_result_labels(data):
def _get_labels(score_inst):
labels = set()
for idx, result in score_inst:
true_label = result.features[0]
predict_label = result.features[1]
labels.add(true_label)
labels.add(predict_label)
return labels
label_set = data.applyPartitions(_get_labels)
label_set = label_set.reduce(lambda x1, x2: x1.union(x2))
if len(label_set) > consts.MAX_CLASSNUM:
raise ValueError("In Classify Task, max dif classes should be no more than %d" % (consts.MAX_CLASSNUM))
return label_set
def rubbish_clear(rubbish_list):
"""
Temporary procession for resource recovery. This will be discarded in next version because of our new resource recovery plan
Parameter
----------
rubbish_list: list of Table, each Table in this will be destroy
"""
for r in rubbish_list:
try:
if r is None:
continue
r.destroy()
except Exception as e:
LOGGER.warning("destroy table error,:{}, but this can be ignored sometimes".format(e))
def check_with_inst_id(data_instances):
instance = data_instances.first()[1]
if isinstance(instance, Instance) and instance.with_inst_id:
return True
return False
def predict_detail_dict_to_str(result_dict):
return "\"" + json.dumps(result_dict).replace("\"", "\'") + "\""
def predict_detail_str_to_dict(result_dict_str):
return json.loads(json.loads(result_dict_str).replace("\'", "\""))
def scale_sample_weight(data_instances):
data_count = data_instances.count()
def _sum_all_weight(kv_iterator):
weight_sum = 0
for _, v in kv_iterator:
weight_sum += v.weight
return weight_sum
total_weight = data_instances.mapPartitions(_sum_all_weight).reduce(lambda x, y: x + y)
# LOGGER.debug(f"weight_sum is : {total_weight}")
scale_factor = data_count / total_weight
# LOGGER.debug(f"scale factor is : {total_weight}")
def _replace_weight(instance):
new_weight = instance.weight * scale_factor
instance.set_weight(new_weight)
return instance
scaled_instances = data_instances.mapValues(lambda v: _replace_weight(v))
return scaled_instances
class DataStatistics(object):
def __init__(self):
self.multivariate_statistic_obj = None
def static_all_values(self, data_instances, static_col_indexes, is_sparse: bool = False):
if not is_sparse:
f = functools.partial(self.__dense_values_set,
static_col_indexes=static_col_indexes)
else:
f = functools.partial(self.__sparse_values_set,
static_col_indexes=static_col_indexes)
result_sets = data_instances.applyPartitions(f).reduce(self.__reduce_set_results)
result = [sorted(list(x)) for x in result_sets]
return result
@staticmethod
def __dense_values_set(instances, static_col_indexes: list):
result = [set() for _ in static_col_indexes]
for _, instance in instances:
for idx, col_index in enumerate(static_col_indexes):
value_set = result[idx]
value_set.add(instance.features[col_index])
return result
@staticmethod
def __sparse_values_set(instances, static_col_indexes: list):
tmp_result = {idx: set() for idx in static_col_indexes}
for _, instance in instances:
data_generator = instance.features.get_all_data()
for idx, value in data_generator:
if idx not in tmp_result:
continue
tmp_result[idx].add(value)
result = [tmp_result[x] for x in static_col_indexes]
return result
@staticmethod
def __reduce_set_results(result_set_a, result_set_b):
final_result_sets = []
for set_a, set_b in zip(result_set_a, result_set_b):
final_result_sets.append(set_a.union(set_b))
return final_result_sets
| 11,969 | 31.794521 | 128 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/data_statistics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.feature.fate_element_type import NoneType
from federatedml.model_base import ModelBase
from federatedml.param.statistics_param import StatisticsParam
from federatedml.protobuf.generated import statistic_meta_pb2, statistic_param_pb2
from federatedml.statistic.data_overview import get_header
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.util import LOGGER
from federatedml.util import abnormal_detection
from federatedml.util import consts
MODEL_PARAM_NAME = 'StatisticParam'
MODEL_META_NAME = 'StatisticMeta'
SYSTEM_ABNORMAL_VALUES = [None, np.nan, NoneType]
class StatisticInnerParam(object):
def __init__(self):
self.col_name_maps = {}
self.header = []
self.static_indices = []
self.static_indices_set = set()
self.static_names = []
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_static_all(self):
self.static_indices = [i for i in range(len(self.header))]
self.static_indices_set = set(self.static_indices)
self.static_names = self.header
def add_static_indices(self, static_indices):
if static_indices is None:
return
for idx in static_indices:
if idx >= len(self.header):
LOGGER.warning("Adding indices that out of header's bound")
continue
if idx not in self.static_indices_set:
self.static_indices_set.add(idx)
self.static_indices.append(idx)
self.static_names.append(self.header[idx])
def add_static_names(self, static_names):
if static_names is None:
return
for col_name in static_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning(f"Adding col_name: {col_name} that is not exist in header")
continue
if idx not in self.static_indices_set:
self.static_indices_set.add(idx)
self.static_indices.append(idx)
self.static_names.append(self.header[idx])
class DataStatistics(ModelBase):
def __init__(self):
super().__init__()
self.model_param = StatisticsParam()
self.inner_param = None
self.schema = None
self.statistic_obj: MultivariateStatisticalSummary = None
self._result_dict = {}
self._numeric_statics = []
self._quantile_statics = []
self.feature_value_pb = []
def _init_model(self, model_param):
self.model_param = model_param
for stat_name in self.model_param.statistics:
if stat_name in self.model_param.LEGAL_STAT:
self._numeric_statics.append(stat_name)
else:
self._quantile_statics.append(stat_name)
def _init_param(self, data_instances):
if self.schema is None or len(self.schema) == 0:
self.schema = data_instances.schema
if self.inner_param is not None:
return
self.inner_param = StatisticInnerParam()
# self.schema = data_instances.schema
LOGGER.debug("In _init_params, schema is : {}".format(self.schema))
header = get_header(data_instances)
self.inner_param.set_header(header)
if self.model_param.column_indexes == -1:
self.inner_param.set_static_all()
else:
self.inner_param.add_static_indices(self.model_param.column_indexes)
self.inner_param.add_static_names(self.model_param.column_names)
LOGGER.debug(f"column_indexes: {self.model_param.column_indexes}, inner_param"
f" static_indices: {self.inner_param.static_indices}")
return self
@staticmethod
def _merge_abnormal_list(abnormal_list):
if abnormal_list is None:
return SYSTEM_ABNORMAL_VALUES
return abnormal_list + SYSTEM_ABNORMAL_VALUES
def fit(self, data_instances):
self._init_param(data_instances)
self._abnormal_detection(data_instances)
if consts.KURTOSIS in self.model_param.statistics:
stat_order = 4
elif consts.SKEWNESS in self.model_param.statistics:
stat_order = 3
else:
stat_order = 2
abnormal_list = self._merge_abnormal_list(self.model_param.abnormal_list)
self.statistic_obj = MultivariateStatisticalSummary(data_instances,
cols_index=self.inner_param.static_indices,
abnormal_list=abnormal_list,
error=self.model_param.quantile_error,
stat_order=stat_order,
bias=self.model_param.bias)
results = None
for stat_name in self._numeric_statics:
stat_res = self.statistic_obj.get_statics(stat_name)
LOGGER.debug(f"state_name: {stat_name}, stat_res: {stat_res}")
self.feature_value_pb.append(self._convert_pb(stat_res, stat_name))
if results is None:
results = {k: {stat_name: v} for k, v in stat_res.items()}
else:
for k, v in results.items():
results[k] = dict(**v, **{stat_name: stat_res[k]})
for query_point in self._quantile_statics:
q = float(query_point[:-1]) / 100
res = self.statistic_obj.get_quantile_point(q)
self.feature_value_pb.append(self._convert_pb(res, query_point))
if results is None:
results = res
else:
for k, v in res.items():
results[k][query_point] = v
for k, v in results.items():
# new_dict = {}
# for stat_name, value in v.items():
# LOGGER.debug(f"stat_name: {stat_name}, value: {value}, type: {type(value)}")
self.add_summary(k, v)
LOGGER.debug(f"Before return, summary: {self.summary()}")
def _convert_pb(self, stat_res, stat_name):
values = [stat_res[col_name] for col_name in self.inner_param.static_names]
return statistic_param_pb2.StatisticSingleFeatureValue(
values=values,
col_names=self.inner_param.static_names,
value_name=stat_name
)
def export_model(self):
if self.model_output is not None:
return self.model_output
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
self.model_output = result
return result
def _get_meta(self):
return statistic_meta_pb2.StatisticMeta(
statistics=self.model_param.statistics,
static_columns=self.inner_param.static_names,
quantile_error=self.model_param.quantile_error,
need_run=self.model_param.need_run
)
def _get_param(self):
all_result = statistic_param_pb2.StatisticOnePartyResult(
results=self.feature_value_pb
)
return statistic_param_pb2.ModelParam(
self_values=all_result,
model_name=consts.STATISTIC_MODEL
)
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
self.check_schema_content(data_instances.schema)
| 8,524 | 37.926941 | 103 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from federatedml.statistic import intersect
#
# __all__ = ['intersect']
| 691 | 33.6 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/feldman_verifiable_sum/feldman_verifiable_sum_host.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
from federatedml.util import LOGGER
from federatedml.transfer_variable.transfer_class.feldman_verifiable_sum_transfer_variable import \
FeldmanVerifiableSumTransferVariables
from federatedml.param.feldman_verifiable_sum_param import FeldmanVerifiableSumParam
from federatedml.statistic.feldman_verifiable_sum.base_feldman_verifiable_sum import BaseFeldmanVerifiableSum
class FeldmanVerifiableSumHost(BaseFeldmanVerifiableSum):
def __init__(self):
super(FeldmanVerifiableSumHost, self).__init__()
self.transfer_inst = FeldmanVerifiableSumTransferVariables()
self.host_party_idlist = []
self.local_partyid = -1
def _init_model(self, model_param: FeldmanVerifiableSumParam):
self.sum_cols = model_param.sum_cols
self.vss.Q_n = model_param.q_n
def _init_data(self, data_inst):
self.local_partyid = self.component_properties.local_partyid
self.host_party_idlist = self.component_properties.host_party_idlist
self.host_count = len(self.host_party_idlist)
self.vss.key_pair()
self.vss.set_share_amount(self.host_count)
if not self.sum_cols:
self.x = data_inst.mapValues(lambda x: x.features)
else:
self.x = data_inst.mapValues(self.select_data_by_idx)
def select_data_by_idx(self, values):
data = []
for idx, feature in enumerate(values.features):
if idx in self.model_param.sum_cols:
data.append(feature)
return numpy.array(data)
def sync_share_to_parties(self):
for idx, party_id in enumerate(self.host_party_idlist):
if self.local_partyid != party_id:
self.transfer_inst.host_share_to_host.remote(self.sub_key[idx],
role="host",
idx=idx)
else:
self.x_plus_y = self.sub_key[idx]
self.transfer_inst.host_share_to_guest.remote(self.sub_key[-1],
role="guest",
idx=0)
self.transfer_inst.host_commitments.remote(self.commitments, role="host", idx=-1)
self.transfer_inst.host_commitments.remote(self.commitments, role="guest", idx=-1)
def recv_share_from_parties(self):
for idx, party_id in enumerate(self.host_party_idlist):
if self.local_partyid != party_id:
sub_key = self.transfer_inst.host_share_to_host.get(idx=idx)
commitment = self.transfer_inst.host_commitments.get(idx=idx)
self.verify_subkey(sub_key, commitment, self.component_properties.host_party_idlist[idx])
self.y_recv.append(sub_key)
else:
sub_key = self.transfer_inst.guest_share_subkey.get(idx=0)
commitment = self.transfer_inst.guest_commitments.get(idx=0)
self.verify_subkey(sub_key, commitment, self.component_properties.guest_partyid)
self.y_recv.append(sub_key)
def sync_host_sum_to_guest(self):
self.transfer_inst.host_sum.remote(self.x_plus_y,
role="guest",
idx=-1)
def fit(self, data_inst):
LOGGER.info("begin to make host data")
self._init_data(data_inst)
LOGGER.info("split data into multiple random parts")
self.secure()
LOGGER.info("share one of random part data to multiple parties")
self.sync_share_to_parties()
LOGGER.info("get share of one random part data from multiple parties")
self.recv_share_from_parties()
LOGGER.info("begin to get sum of multiple party")
self.sub_key_sum()
LOGGER.info("send host sum to guest")
self.sync_host_sum_to_guest()
| 4,624 | 41.824074 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/feldman_verifiable_sum/base_feldman_verifiable_sum.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.model_base import ModelBase
from federatedml.param.feldman_verifiable_sum_param import FeldmanVerifiableSumParam
from federatedml.secureprotol.secret_sharing.verifiable_secret_sharing.feldman_verifiable_secret_sharing \
import FeldmanVerifiableSecretSharing
class BaseFeldmanVerifiableSum(ModelBase):
def __init__(self):
super(BaseFeldmanVerifiableSum, self).__init__()
self.vss = FeldmanVerifiableSecretSharing()
self.host_count = None
self.model_param = FeldmanVerifiableSumParam()
self.sum_cols = None
self.x = None
self.sub_key = [] # (x,f(x))
self.commitments = None # (x,g(ai))
self.y_recv = []
self.commitments_recv = []
self.host_sum_recv = []
self.x_plus_y = None
self.secret_sum = None
def secure(self):
encrypt_result = self.x.mapValues(self.generate_shares)
sub_key_table = encrypt_result.mapValues(lambda x: x[0])
self.commitments = encrypt_result.mapValues(lambda x: x[1])
for i in range(self.host_count + 1):
sub_key = sub_key_table.mapValues(lambda y: y[:, i])
self.sub_key.append(sub_key)
def generate_shares(self, values):
keys = []
commitments = []
for s in values:
sub_key, commitment = self.vss.encrypt(s)
keys.append(sub_key)
commitments.append(commitment)
res = (np.array(keys), np.array(commitments))
return res
def sub_key_sum(self):
for recv in self.y_recv:
self.x_plus_y = self.x_plus_y.join(recv, lambda x, y: np.column_stack((x[:, 0], np.add(x[:, 1], y[:, 1]))))
def reconstruct(self):
for recv in self.host_sum_recv:
self.x_plus_y = self.x_plus_y.join(recv, lambda x, y: np.column_stack((x, y)))
self.secret_sum = self.x_plus_y.mapValues(self.decrypt)
def decrypt(self, values):
secret_sum = []
for v in values:
x_values = v[::2]
y_values = v[1::2]
secret_sum.append(self.vss.decrypt(x_values, y_values))
return secret_sum
def verify_sumkey(self, sum_key, commitment, party_id):
for recv in self.commitments_recv:
commitment = commitment.join(recv, lambda x, y: (x * y) % self.vss.p)
sum_key.join(commitment, lambda x, y: self.verify(x, y, party_id, "sum_key"))
def verify_subkey(self, sub_key, commitment, party_id):
sub_key.join(commitment, lambda x, y: self.verify(x, y, party_id, "sub_key"))
def verify(self, key, commitment, party_id, key_type):
for idx, key in enumerate(key):
res = self.vss.verify(key, commitment[idx])
if not res:
raise ValueError(f"Get wrong {key_type} from {party_id}")
return True
| 3,526 | 37.758242 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/feldman_verifiable_sum/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/statistic/feldman_verifiable_sum/feldman_verifiable_sum_guest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy
from federatedml.util import LOGGER
from federatedml.feature.instance import Instance
from federatedml.transfer_variable.transfer_class import feldman_verifiable_sum_transfer_variable
from federatedml.param.feldman_verifiable_sum_param import FeldmanVerifiableSumParam
from federatedml.statistic.feldman_verifiable_sum.base_feldman_verifiable_sum import BaseFeldmanVerifiableSum
class FeldmanVerifiableSumGuest(BaseFeldmanVerifiableSum):
def __init__(self):
super(FeldmanVerifiableSumGuest, self).__init__()
self.transfer_inst = feldman_verifiable_sum_transfer_variable.FeldmanVerifiableSumTransferVariables()
self.output_schema = None
def _init_model(self, model_param: FeldmanVerifiableSumParam):
self.sum_cols = model_param.sum_cols
self.vss.Q_n = model_param.q_n
def _init_data(self, data_inst):
self.host_count = len(self.component_properties.host_party_idlist)
self.vss.key_pair()
self.vss.set_share_amount(self.host_count)
if not self.model_param.sum_cols:
self.x = data_inst.mapValues(lambda x: x.features)
self.output_schema = data_inst.schema
else:
self.x = data_inst.mapValues(self.select_data_by_idx)
header = []
for idx, label in enumerate(data_inst.schema.get('header')):
if idx in self.sum_cols:
header.append(label)
# self.output_schema = {"header": header, "sid_name": data_inst.schema.get('sid_name')}
schema = copy.deepcopy(data_inst.schema)
schema["header"] = header
self.output_schema = schema
def select_data_by_idx(self, values):
data = []
for idx, feature in enumerate(values.features):
if idx in self.sum_cols:
data.append(feature)
return numpy.array(data)
def sync_share_to_host(self):
for idx in range(self.host_count):
self.transfer_inst.guest_share_subkey.remote(self.sub_key[idx],
role="host",
idx=idx)
self.transfer_inst.guest_commitments.remote(self.commitments,
role="host",
idx=-1)
self.x_plus_y = self.sub_key[-1]
def recv_share_from_host(self):
for idx in range(self.host_count):
sub_key = self.transfer_inst.host_share_to_guest.get(idx=idx)
commitment = self.transfer_inst.host_commitments.get(idx=idx)
self.verify_subkey(sub_key, commitment, self.component_properties.host_party_idlist[idx])
self.y_recv.append(sub_key)
self.commitments_recv.append(commitment)
def recv_host_sum_from_host(self):
for idx in range(self.host_count):
host_sum = self.transfer_inst.host_sum.get(idx=idx)
self.verify_sumkey(host_sum, self.commitments, self.component_properties.host_party_idlist[idx])
self.host_sum_recv.append(host_sum)
def fit(self, data_inst):
LOGGER.info("begin to make guest data")
self._init_data(data_inst)
LOGGER.info("split data into multiple random parts")
self.secure()
LOGGER.info("share one random part data to multiple hosts")
self.sync_share_to_host()
LOGGER.info("get share of one random part data from multiple hosts")
self.recv_share_from_host()
LOGGER.info("begin to get sum of multiple party")
self.sub_key_sum()
LOGGER.info("receive host sum from host")
self.recv_host_sum_from_host()
self.reconstruct()
LOGGER.info("success to calculate privacy sum")
self.secret_sum = self.secret_sum.join(data_inst, lambda s, v: Instance(features=numpy.array(s),
inst_id=v.inst_id))
self.secret_sum.schema = self.output_schema
data_output = self.secret_sum
return data_output
| 4,824 | 39.208333 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/scorecard/score_transformer.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from federatedml.model_base import ModelBase
from federatedml.param.scorecard_param import ScorecardParam
from federatedml.util.consts import FLOAT_ZERO
from federatedml.util import LOGGER
from federatedml.feature.instance import Instance
class Scorecard(ModelBase):
def __init__(self):
super().__init__()
self.model_param = ScorecardParam()
self.metric_name = "scorecard"
self.metric_namespace = "train"
self.metric_type = "SCORECARD"
self.use_match_id = False
def _init_model(self, params):
self.model_param = params
self.method = params.method
self.offset = params.offset
self.factor = params.factor
self.factor_base = params.factor_base
self.upper_limit_ratio = params.upper_limit_ratio
self.lower_limit_value = params.lower_limit_value
self.need_run = params.need_run
@staticmethod
def compute_credit_score(result, offset, factor, factor_base, upper_limit_value, lower_limit_value,
use_match_id=False):
predict_result = result
if use_match_id:
predict_result = result.features
predict_score = predict_result[2]
# deal with special predict score values
if abs(predict_score - 0) <= FLOAT_ZERO and predict_score >= 0:
credit_score = upper_limit_value
elif abs(predict_score - 1) <= FLOAT_ZERO and predict_score > 0:
credit_score = lower_limit_value
elif predict_score > 1 or predict_score < 0:
credit_score = -1
else:
odds = (1 - predict_score) / predict_score
credit_score = offset + factor / np.log(factor_base) * np.log(odds)
# credit score should be within range
if credit_score > upper_limit_value:
credit_score = upper_limit_value
if credit_score < lower_limit_value:
credit_score = lower_limit_value
credit_score = round(credit_score, 2)
if use_match_id:
credit_result = copy.deepcopy(result)
credit_result.features = [predict_result[0], predict_result[1], predict_score, credit_score]
else:
credit_result = [predict_result[0], predict_result[1], predict_score, credit_score]
return credit_result
def _set_summary(self):
formula = f"Score = {self.offset} + {self.factor} / ln({self.factor_base}) * ln(Odds)"
self.set_summary({"scorecard_compute_formula": formula})
LOGGER.info(f"Scorecard Computation Formula: {formula}")
def fit(self, prediction_result):
LOGGER.info(f"Start Scorecard Transform, method: {self.method}")
offset, factor, factor_base = self.offset, self.factor, self.factor_base
if factor_base != 2:
LOGGER.warning(f"scorecard param 'factor_base' given is {factor_base}, which is not equal to 2.")
upper_limit_value, lower_limit_value = self.upper_limit_ratio * offset, self.lower_limit_value
if isinstance(prediction_result.first()[1], Instance):
self.use_match_id = True
score_result = prediction_result.mapValues(lambda v: Scorecard.compute_credit_score(v, offset, factor,
factor_base,
upper_limit_value,
lower_limit_value,
self.use_match_id))
result_schema = copy.deepcopy(prediction_result.schema)
result_schema["header"] = ["label", "predict_result", "predict_score", "credit_score"]
score_result.schema = result_schema
self._set_summary()
LOGGER.info(f"Finish Scorecard Transform!")
return score_result
| 4,627 | 41.851852 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/scorecard/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/statistic/correlation/hetero_pearson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.common import Party
from federatedml.model_base import MetricMeta, ModelBase
from federatedml.param.pearson_param import PearsonParam
from federatedml.secureprotol.spdz import SPDZ
from federatedml.secureprotol.spdz.tensor.fixedpoint_table import (
FixedPointTensor,
table_dot,
)
from federatedml.statistic.data_overview import get_anonymous_header, get_header
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
from federatedml.util import LOGGER
class PearsonTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.anonymous_host = self._create_variable(
"anonymous_host", src=["host"], dst=["guest"]
)
self.anonymous_guest = self._create_variable(
"anonymous_guest", src=["guest"], dst=["host"]
)
class HeteroPearson(ModelBase):
def __init__(self):
super().__init__()
self.model_param = PearsonParam()
self.transfer_variable = PearsonTransferVariable()
self._summary = {}
self._modelsaver = PearsonModelSaver()
def fit(self, data_instance):
LOGGER.info("fit start")
column_names = get_header(data_instance)
column_anonymous_names = get_anonymous_header(data_instance)
self._modelsaver.save_local_anonymous(column_names, column_anonymous_names)
parties = [
Party("guest", self.component_properties.guest_partyid),
Party("host", self.component_properties.host_party_idlist[0]),
]
local_party = parties[0] if self.is_guest else parties[1]
other_party = parties[1] if self.is_guest else parties[0]
self._modelsaver.save_party(local_party)
LOGGER.info("select features")
names, selected_features = select_columns(
data_instance,
self.model_param.column_indexes,
self.model_param.column_names,
)
LOGGER.info("standardized feature data")
num_data, standardized, remainds_indexes, num_features = standardize(
selected_features
)
self._summary["num_local_features"] = num_features
# local corr
LOGGER.info("calculate correlation cross local features")
local_corr = table_dot(standardized, standardized) / num_data
fixed_local_corr = fix_local_corr(local_corr, remainds_indexes, num_features)
self._modelsaver.save_local_corr(fixed_local_corr)
self._summary["local_corr"] = fixed_local_corr.tolist()
shape = fixed_local_corr.shape[0]
# local vif
if self.model_param.calc_local_vif:
LOGGER.info("calc_local_vif enabled, calculate vif for local features")
local_vif = vif_from_pearson_matrix(local_corr)
fixed_local_vif = fix_vif(local_vif, remainds_indexes, num_features)
self._modelsaver.save_local_vif(fixed_local_vif)
else:
LOGGER.info("calc_local_vif disabled, skip local vif")
# not cross parties
if not self.model_param.cross_parties:
LOGGER.info("cross_parties disabled, save model")
self._modelsaver.save_party_info(shape, local_party, names)
# cross parties
else:
LOGGER.info(
"cross_parties enabled, calculating correlation with remote features"
)
# sync anonymous
LOGGER.info("sync anonymous names")
remote_anonymous_names, remote_remainds_indexes = self.sync_anonymous_names(
column_anonymous_names, remainds_indexes
)
if self.is_guest:
names = [column_names, remote_anonymous_names]
remainds_indexes_tuple = (remainds_indexes, remote_remainds_indexes)
else:
names = [remote_anonymous_names, column_names]
remainds_indexes_tuple = (remote_remainds_indexes, remainds_indexes)
m1, m2 = len(names[0]), len(names[1])
shapes = [m1, m2]
for shape, party, name in zip(shapes, parties, names):
self._modelsaver.save_party_info(shape, party, name)
self._summary["num_remote_features"] = m2 if self.is_guest else m1
with SPDZ(
"pearson",
local_party=local_party,
all_parties=parties,
use_mix_rand=self.model_param.use_mix_rand,
) as spdz:
LOGGER.info("secret share: prepare data")
if self.is_guest:
x, y = (
FixedPointTensor.from_source("x", standardized),
FixedPointTensor.from_source("y", other_party),
)
else:
y, x = (
FixedPointTensor.from_source("y", standardized),
FixedPointTensor.from_source("x", other_party),
)
LOGGER.info("secret share: dot")
corr = spdz.dot(x, y, "corr").get() / num_data
fixed_corr = fix_corr(
corr, m1, m2, remainds_indexes_tuple[0], remainds_indexes_tuple[1]
)
self._modelsaver.save_cross_corr(fixed_corr)
self._summary["corr"] = fixed_corr.tolist()
self._callback()
self.set_summary(self._summary)
LOGGER.info("fit done")
@property
def is_guest(self):
return self.component_properties.role == "guest"
def _init_model(self, param):
super()._init_model(param)
self.model_param = param
def export_model(self):
return self._modelsaver.export()
# noinspection PyTypeChecker
def _callback(self):
self.tracker.set_metric_meta(
metric_namespace="statistic",
metric_name="correlation",
metric_meta=MetricMeta(name="pearson", metric_type="CORRELATION_GRAPH"),
)
def sync_anonymous_names(self, local_anonymous, remainds_indexes):
if self.is_guest:
self.transfer_variable.anonymous_guest.remote(
(local_anonymous, remainds_indexes), role="host"
)
(
remote_anonymous,
remote_remainds_indexes,
) = self.transfer_variable.anonymous_host.get(role="host", idx=0)
else:
self.transfer_variable.anonymous_host.remote(
(local_anonymous, remainds_indexes), role="guest"
)
(
remote_anonymous,
remote_remainds_indexes,
) = self.transfer_variable.anonymous_guest.get(role="guest", idx=0)
return remote_anonymous, remote_remainds_indexes
class PearsonModelSaver:
def __init__(self) -> None:
from federatedml.protobuf.generated import (
pearson_model_meta_pb2,
pearson_model_param_pb2,
)
self.meta_pb = pearson_model_meta_pb2.PearsonModelMeta()
self.param_pb = pearson_model_param_pb2.PearsonModelParam()
self.param_pb.model_name = "HeteroPearson"
def export(self):
MODEL_META_NAME = "HeteroPearsonModelMeta"
MODEL_PARAM_NAME = "HeteroPearsonModelParam"
return {MODEL_META_NAME: self.meta_pb, MODEL_PARAM_NAME: self.param_pb}
def save_shapes(self, shapes):
for shape in shapes:
self.meta_pb.shapes.append(shape)
def save_local_corr(self, corr):
self.param_pb.shape = corr.shape[0]
for v in corr.reshape(-1):
self.param_pb.local_corr.append(v.tolist())
def save_party_info(self, shape, party, names):
self.param_pb.shapes.append(shape)
self.param_pb.parties.append(f"({party.role},{party.party_id})")
_names = self.param_pb.all_names.add()
for name in names:
_names.names.append(name)
def save_local_vif(self, local_vif):
for vif_value in local_vif:
self.param_pb.local_vif.append(vif_value)
def save_cross_corr(self, corr):
for v in corr.reshape(-1):
self.param_pb.corr.append(v.tolist())
def save_party(self, party):
self.param_pb.party = f"({party.role},{party.party_id})"
def save_local_anonymous(self, names, anonymous_names):
for name, anonymous_name in zip(names, anonymous_names):
self.param_pb.names.append(name)
anonymous = self.param_pb.anonymous_map.add()
anonymous.name = name
anonymous.anonymous = anonymous_name
def standardize(data):
"""
x -> (x - mu) / sigma
"""
n = data.count()
sum_x, sum_square_x = data.mapValues(lambda x: (x, x ** 2)).reduce(
lambda pair1, pair2: (pair1[0] + pair2[0], pair1[1] + pair2[1])
)
mu = sum_x / n
sigma = np.sqrt(sum_square_x / n - mu ** 2)
size = len(sigma)
remiands_indexes = [i for i, e in enumerate(sigma) if e > 0]
if len(remiands_indexes) < size:
LOGGER.warning(
f"zero standard deviation detected, sigma={sigma}, zeroindexes={np.argwhere(sigma)}"
)
return (
n,
data.mapValues(
lambda x: (x[remiands_indexes] - mu[remiands_indexes])
/ sigma[remiands_indexes]
),
remiands_indexes,
size,
)
return n, data.mapValues(lambda x: (x - mu) / sigma), remiands_indexes, size
def select_columns(data_instance, hit_column_indexes, hit_column_names):
"""
select features
"""
column_names = data_instance.schema["header"]
num_columns = len(column_names)
# accept all features
if hit_column_indexes == -1:
if len(hit_column_names) > 0:
raise ValueError(f"specify column name when column_indexes=-1 is ambiguity")
return column_names, data_instance.mapValues(lambda inst: inst.features)
# check hit column indexes and column names
name_to_index = {c: i for i, c in enumerate(column_names)}
selected = set()
for name in hit_column_names:
if name not in name_to_index:
raise ValueError(f"feature name `{name}` not found in data schema")
else:
selected.add(name_to_index[name])
for idx in hit_column_indexes:
if 0 <= idx < num_columns:
selected.add(idx)
else:
raise ValueError(f"feature idx={idx} out of bound")
selected = sorted(list(selected))
# take shortcut if all feature hit
if len(selected) == len(column_names):
return column_names, data_instance.mapValues(lambda inst: inst.features)
return (
[column_names[i] for i in selected],
data_instance.mapValues(lambda inst: inst.features[selected]),
)
def vif_from_pearson_matrix(pearson_matrix, threshold=1e-8):
LOGGER.info(f"local vif calc: start")
assert not np.isnan(
pearson_matrix
).any(), f"should not contains nan: {pearson_matrix}"
N = pearson_matrix.shape[0]
vif = []
LOGGER.info(f"local vif calc: calc matrix eigvals")
eig = sorted([abs(v) for v in np.linalg.eigvalsh(pearson_matrix)])
num_drop = len(list(filter(lambda x: x < threshold, eig)))
det_non_zero = np.prod(eig[num_drop:])
LOGGER.info(f"local vif calc: calc submatrix eigvals")
for i in range(N):
indexes = [j for j in range(N) if j != i]
cofactor_matrix = pearson_matrix[indexes][:, indexes]
cofactor_eig = sorted([abs(v) for v in np.linalg.eigvalsh(cofactor_matrix)])
vif.append(np.prod(cofactor_eig[num_drop:]) / det_non_zero)
LOGGER.info(f"local vif calc: submatrix {i+1}/{N} eig is {vif[-1]}")
LOGGER.info(f"local vif calc done")
return vif
def fix_local_corr(remaind_corr, remainds_indexes, size):
corr = np.zeros((size, size))
corr.fill(np.nan)
corr[np.ix_(remainds_indexes, remainds_indexes)] = np.clip(remaind_corr, -1.0, 1.0)
return corr
def fix_vif(remains_vif, remainds_indexes, size):
vif = np.zeros(size)
vif.fill(np.nan)
vif[remainds_indexes] = remains_vif
return vif
def fix_corr(remaind_corr, m1, m2, remainds_indexes1, remainds_indexes2):
corr = np.zeros((m1, m2))
corr.fill(np.nan)
corr[np.ix_(remainds_indexes1, remainds_indexes2)] = np.clip(
remaind_corr, -1.0, 1.0
)
return corr
| 13,069 | 36.130682 | 96 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/correlation/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/test/statics_test.py
|
import math
import time
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.util import consts
session.init("123")
from federatedml.feature.instance import Instance
from federatedml.statistic.statics import MultivariateStatisticalSummary
class TestStatistics(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
self.eps = 1e-5
self.count = 1000
self.feature_num = 100
self._dense_table, self._dense_not_inst_table, self._original_data = None, None, None
def _gen_table_data(self):
if self._dense_table is not None:
return self._dense_table, self._dense_not_inst_table, self._original_data
headers = ['x' + str(i) for i in range(self.feature_num)]
dense_inst = []
dense_not_inst = []
original_data = 100 * np.random.random((self.count, self.feature_num))
# original_data = 100 * np.zeros((self.count, self.feature_num))
for i in range(self.count):
features = original_data[i, :]
inst = Instance(features=features)
dense_inst.append((i, inst))
dense_not_inst.append((i, features))
dense_table = session.parallelize(dense_inst, include_key=True, partition=16)
dense_not_inst_table = session.parallelize(dense_not_inst, include_key=True, partition=16)
dense_table.schema = {'header': headers}
dense_not_inst_table.schema = {'header': headers}
self._dense_table, self._dense_not_inst_table, self._original_data = \
dense_table, dense_not_inst_table, original_data
return dense_table, dense_not_inst_table, original_data
def _gen_missing_table(self):
headers = ['x' + str(i) for i in range(self.feature_num)]
dense_inst = []
dense_not_inst = []
original_data = 100 * np.random.random((self.count, self.feature_num))
for i in range(self.count):
features = original_data[i, :]
if i % 2 == 0:
features = np.array([np.nan] * self.feature_num)
inst = Instance(features=features)
dense_inst.append((i, inst))
dense_not_inst.append((i, features))
dense_table = session.parallelize(dense_inst, include_key=True, partition=16)
dense_not_inst_table = session.parallelize(dense_not_inst, include_key=True, partition=16)
dense_table.schema = {'header': headers}
dense_not_inst_table.schema = {'header': headers}
return dense_table, dense_not_inst_table, original_data
def test_MultivariateStatisticalSummary(self):
dense_table, dense_not_inst_table, original_data = self._gen_table_data()
summary_obj = MultivariateStatisticalSummary(dense_table)
self._test_min_max(summary_obj, original_data, dense_table)
self._test_min_max(summary_obj, original_data, dense_not_inst_table)
def _test_min_max(self, summary_obj, original_data, data_table):
# test max, min
max_array = np.max(original_data, axis=0)
min_array = np.min(original_data, axis=0)
mean_array = np.mean(original_data, axis=0)
var_array = np.var(original_data, axis=0)
std_var_array = np.std(original_data, axis=0)
t0 = time.time()
header = data_table.schema['header']
for idx, col_name in enumerate(header):
self.assertEqual(summary_obj.get_max()[col_name], max_array[idx])
self.assertEqual(summary_obj.get_min()[col_name], min_array[idx])
self.assertTrue(self._float_equal(summary_obj.get_mean()[col_name], mean_array[idx]))
self.assertTrue(self._float_equal(summary_obj.get_variance()[col_name], var_array[idx]))
self.assertTrue(self._float_equal(summary_obj.get_std_variance()[col_name], std_var_array[idx]))
print("max value etc, total time: {}".format(time.time() - t0))
def _float_equal(self, x, y, error=1e-6):
if math.fabs(x - y) < error:
return True
print(f"x: {x}, y: {y}")
return False
# def test_median(self):
# error = 0
# dense_table, dense_not_inst_table, original_data = self._gen_table_data()
#
# sorted_matrix = np.sort(original_data, axis=0)
# median_array = sorted_matrix[self.count // 2, :]
# header = dense_table.schema['header']
# summary_obj = MultivariateStatisticalSummary(dense_table, error=error)
# t0 = time.time()
#
# for idx, col_name in enumerate(header):
# self.assertTrue(self._float_equal(summary_obj.get_median()[col_name],
# median_array[idx]))
# print("median interface, total time: {}".format(time.time() - t0))
#
# summary_obj_2 = MultivariateStatisticalSummary(dense_not_inst_table, error=error)
# t0 = time.time()
# for idx, col_name in enumerate(header):
# self.assertTrue(self._float_equal(summary_obj_2.get_median()[col_name],
# median_array[idx]))
# print("median interface, total time: {}".format(time.time() - t0))
#
# def test_quantile_query(self):
#
# dense_table, dense_not_inst_table, original_data = self._gen_table_data()
#
# quantile_points = [0.25, 0.5, 0.75, 1.0]
# quantile_array = np.quantile(original_data, quantile_points, axis=0)
# summary_obj = MultivariateStatisticalSummary(dense_table, error=0)
# header = dense_table.schema['header']
#
# t0 = time.time()
# for q_idx, q in enumerate(quantile_points):
# for idx, col_name in enumerate(header):
# self.assertTrue(self._float_equal(summary_obj.get_quantile_point(q)[col_name],
# quantile_array[q_idx][idx],
# error=3))
# print("quantile interface, total time: {}".format(time.time() - t0))
#
# def test_missing_value(self):
# dense_table, dense_not_inst_table, original_data = self._gen_missing_table()
# summary_obj = MultivariateStatisticalSummary(dense_table, error=0)
# t0 = time.time()
# missing_result = summary_obj.get_missing_ratio()
# for col_name, missing_ratio in missing_result.items():
# self.assertEqual(missing_ratio, 0.5, msg="missing ratio should be 0.5")
# print("calculate missing ratio, total time: {}".format(time.time() - t0))
def test_moment(self):
dense_table, dense_not_inst_table, original_data = self._gen_table_data()
summary_obj = MultivariateStatisticalSummary(dense_table, error=0, stat_order=4, bias=False)
header = dense_table.schema['header']
from scipy import stats
moment_3 = stats.moment(original_data, 3, axis=0)
moment_4 = stats.moment(original_data, 4, axis=0)
skewness = stats.skew(original_data, axis=0, bias=False)
kurtosis = stats.kurtosis(original_data, axis=0, bias=False)
summary_moment_3 = summary_obj.get_statics("moment_3")
summary_moment_4 = summary_obj.get_statics("moment_4")
static_skewness = summary_obj.get_statics("skewness")
static_kurtosis = summary_obj.get_statics("kurtosis")
# print(f"moment: {summary_moment_4}, moment_2: {moment_4}")
for idx, col_name in enumerate(header):
self.assertTrue(self._float_equal(summary_moment_3[col_name],
moment_3[idx]))
self.assertTrue(self._float_equal(summary_moment_4[col_name],
moment_4[idx]))
self.assertTrue(self._float_equal(static_skewness[col_name],
skewness[idx]))
self.assertTrue(self._float_equal(static_kurtosis[col_name],
kurtosis[idx]))
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 8,213 | 44.131868 | 108 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/test/hetero_pearson_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
from fate_arch.session import computing_session as session
import numpy as np
class TestStatistics(unittest.TestCase):
def setUp(self):
session.init((str(uuid.uuid1())))
def test_standardized(self):
from federatedml.statistic.correlation import hetero_pearson
raw_data = np.random.rand(200, 100)
expect = (raw_data - np.mean(raw_data, axis=0)) / np.std(raw_data, axis=0)
data_table = session.parallelize([row for row in raw_data], partition=10, include_key=False)
n, standardized, _, _ = hetero_pearson.standardize(data_table)
standardized_data = np.array([row[1] for row in standardized.collect()])
self.assertEqual(n, standardized_data.shape[0])
self.assertEqual(raw_data.shape, standardized_data.shape)
self.assertAlmostEqual(np.linalg.norm(standardized_data - expect), 0.0)
if __name__ == '__main__':
unittest.main()
| 1,563 | 37.146341 | 100 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/test/statistic_cpn_test.py
|
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.param.statistics_param import StatisticsParam
from federatedml.statistic.data_statistics import DataStatistics
from federatedml.feature.instance import Instance
class TestStatisticCpn(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, partition):
data = []
header = [str(i) for i in range(2)]
anonymous_header = ["guest_9999_x" + str(i) for i in range(2)]
col_1 = np.random.randn(data_num)
col_2 = np.random.rand(data_num)
for key in range(data_num):
data.append((key, Instance(features=np.array([col_1[key], col_2[key]]))))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header,
"anonymous_header": anonymous_header}
self.header = header
self.col_1 = col_1
self.col_2 = col_2
return result
def test_something(self):
statistics_param = StatisticsParam(statistics="summary")
statistics_param.check()
print(statistics_param.statistics)
test_data = self.gen_data(1000, 16)
test_obj = DataStatistics()
test_obj.model_param = statistics_param
test_obj._init_model(statistics_param)
test_obj.fit(test_data)
static_result = test_obj.summary()
stat_res_1 = static_result[self.header[0]]
self.assertTrue(self._float_equal(stat_res_1['sum'], np.sum(self.col_1)))
self.assertTrue(self._float_equal(stat_res_1['max'], np.max(self.col_1)))
self.assertTrue(self._float_equal(stat_res_1['mean'], np.mean(self.col_1)))
self.assertTrue(self._float_equal(stat_res_1['stddev'], np.std(self.col_1)))
self.assertTrue(self._float_equal(stat_res_1['min'], np.min(self.col_1)))
# self.assertEqual(True, False)
def _float_equal(self, x, y, error=1e-6):
if np.fabs(x - y) < error:
return True
print(f"x: {x}, y: {y}")
return False
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 2,290 | 34.246154 | 85 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/test/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/statistic/union/union.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.model_base import Metric, MetricMeta
from federatedml.feature.instance import Instance
from federatedml.model_base import ModelBase
from federatedml.param.union_param import UnionParam
from federatedml.statistic import data_overview
from federatedml.util import LOGGER
from federatedml.util.schema_check import assert_schema_consistent
class Union(ModelBase):
def __init__(self):
super().__init__()
self.model_param = UnionParam()
self.metric_name = "union"
self.metric_namespace = "train"
self.metric_type = "UNION"
self.repeated_ids = None
self.key = None
def _init_model(self, params):
self.model_param = params
self.allow_missing = params.allow_missing
self.keep_duplicate = params.keep_duplicate
self.feature_count = 0
self.is_data_instance = None
self.is_empty_feature = False
@staticmethod
def _keep_first(v1, v2):
return v1
def _renew_id(self, k, v):
result = []
if k in self.repeated_ids:
new_k = f"{k}_{self.key}"
result.append((new_k, v))
else:
result.append((k, v))
return result
def check_id(self, local_table, combined_table):
local_schema, combined_schema = local_table.schema, combined_table.schema
local_sid_name = local_schema.get("sid")
combined_sid_name = combined_schema.get("sid")
if local_sid_name != combined_sid_name:
raise ValueError(f"Id names {local_sid_name} and {combined_sid_name} do not match! "
f"Please check id column names.")
def check_label_name(self, local_table, combined_table):
if not self.is_data_instance:
return
local_schema, combined_schema = local_table.schema, combined_table.schema
local_label_name = local_schema.get("label_name")
combined_label_name = combined_schema.get("label_name")
if local_label_name is None and combined_label_name is None:
return
if local_label_name is None or combined_label_name is None:
raise ValueError("Union try to combine a labeled data set with an unlabelled one."
"Please check labels.")
if local_label_name != combined_label_name:
raise ValueError("Label names do not match. "
"Please check label column names.")
def check_header(self, local_table, combined_table):
local_schema, combined_schema = local_table.schema, combined_table.schema
local_header = local_schema.get("header")
combined_header = combined_schema.get("header")
if local_header != combined_header:
raise ValueError("Table headers do not match! Please check header.")
def check_feature_length(self, data_instance):
if not self.is_data_instance or self.allow_missing:
return
if len(data_instance.features) != self.feature_count:
raise ValueError(f"Feature length {len(data_instance.features)} "
f"mismatch with header length {self.feature_count}")
@staticmethod
def check_is_data_instance(table):
entry = table.first()
is_data_instance = isinstance(entry[1], Instance)
return is_data_instance
@assert_schema_consistent
def fit(self, data):
# LOGGER.debug(f"fit receives data is {data}")
if not isinstance(data, dict) or len(data) <= 1:
raise ValueError("Union module must receive more than one table as input.")
empty_count = 0
combined_table = None
combined_schema = None
metrics = []
for (key, local_table) in data.items():
LOGGER.debug("table to combine name: {}".format(key))
num_data = local_table.count()
LOGGER.debug("table count: {}".format(num_data))
metrics.append(Metric(key, num_data))
self.add_summary(key, num_data)
if num_data == 0:
LOGGER.warning("Table {} is empty.".format(key))
if combined_table is None:
combined_table = local_table
combined_schema = local_table.schema
empty_count += 1
continue
local_is_data_instance = self.check_is_data_instance(local_table)
if self.is_data_instance is None or combined_table is None:
self.is_data_instance = local_is_data_instance
LOGGER.debug(f"self.is_data_instance is {self.is_data_instance}, "
f"local_is_data_instance is {local_is_data_instance}")
if self.is_data_instance != local_is_data_instance:
raise ValueError(f"Cannot combine DataInstance and non-DataInstance object. Union aborted.")
if self.is_data_instance:
self.is_empty_feature = data_overview.is_empty_feature(local_table)
if self.is_empty_feature:
LOGGER.warning("Table {} has empty feature.".format(key))
else:
self.check_schema_content(local_table.schema)
if combined_table is None or combined_table.count() == 0:
# first non-empty table to combine
combined_table = local_table
combined_schema = local_table.schema
if self.keep_duplicate:
combined_table = combined_table.map(lambda k, v: (f"{k}_{key}", v))
combined_table.schema = combined_schema
else:
self.check_id(local_table, combined_table)
self.check_label_name(local_table, combined_table)
self.check_header(local_table, combined_table)
if self.keep_duplicate:
local_table = local_table.map(lambda k, v: (f"{k}_{key}", v))
combined_table = combined_table.union(local_table, self._keep_first)
combined_table.schema = combined_schema
# only check feature length if not empty
if self.is_data_instance and not self.is_empty_feature:
self.feature_count = len(combined_schema.get("header"))
# LOGGER.debug(f"feature count: {self.feature_count}")
combined_table.mapValues(self.check_feature_length)
if combined_table is None:
LOGGER.warning("All tables provided are empty or have empty features.")
first_table = list(data.values())[0]
combined_table = first_table.join(first_table)
num_data = combined_table.count()
metrics.append(Metric("Total", num_data))
self.add_summary("Total", num_data)
LOGGER.info(f"Result total data entry count: {num_data}")
self.callback_metric(metric_name=self.metric_name,
metric_namespace=self.metric_namespace,
metric_data=metrics)
self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,
metric_name=self.metric_name,
metric_meta=MetricMeta(name=self.metric_name, metric_type=self.metric_type))
LOGGER.info("Union operation finished. Total {} empty tables encountered.".format(empty_count))
return combined_table
def check_consistency(self):
pass
def obtain_data(self, data_list):
return data_list
| 8,165 | 42.206349 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/union/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/psi/psi.py
|
import functools
import copy
import numpy as np
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.util import consts
from federatedml.feature.fate_element_type import NoneType
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.param.psi_param import PSIParam
from federatedml.util import LOGGER
from federatedml.protobuf.generated.psi_model_param_pb2 import PsiSummary, FeaturePsi
from federatedml.protobuf.generated.psi_model_meta_pb2 import PSIMeta
from federatedml.util import abnormal_detection
ROUND_NUM = 6
def map_partition_handle(iterable, feat_num=10, max_bin_num=20, is_sparse=False, missing_val=NoneType()):
count_bin = np.zeros((feat_num, max_bin_num))
row_idx = np.array([i for i in range(feat_num)])
for k, v in iterable:
# last bin is for missing value
if is_sparse:
feature_dict = v.features.sparse_vec
arr = np.zeros(feat_num, dtype=np.int64) + max_bin_num - 1 # max_bin_num - 1 is the missing bin val
arr[list(feature_dict.keys())] = list(feature_dict.values())
else:
arr = v.features
arr[arr == missing_val] = max_bin_num - 1
count_bin[row_idx, arr.astype(np.int64)] += 1
return count_bin
def map_partition_reduce(arr1, arr2):
return arr1 + arr2
def psi_computer(expect_counter_list, actual_counter_list, expect_sample_count, actual_sample_count):
psi_rs = []
for exp_counter, acu_counter in zip(expect_counter_list, actual_counter_list):
feat_psi = {}
for key in exp_counter:
feat_psi[key] = psi_val(exp_counter[key] / expect_sample_count, acu_counter[key] / actual_sample_count)
total_psi = 0
for k in feat_psi:
total_psi += feat_psi[k]
feat_psi['total_psi'] = total_psi
psi_rs.append(feat_psi)
return psi_rs
def psi_val(expected_perc, actual_perc):
if expected_perc == 0:
expected_perc = 1e-6
if actual_perc == 0:
actual_perc = 1e-6
return (actual_perc - expected_perc) * np.log(actual_perc / expected_perc)
def psi_val_arr(expected_arr, actual_arr, sample_num):
expected_arr = expected_arr / sample_num
actual_arr = actual_arr / sample_num
expected_arr[expected_arr == 0] = 1e-6
actual_arr[actual_arr == 0] = 1e-6
psi_rs = (actual_arr - expected_arr) * np.log(actual_arr / expected_arr)
return psi_rs
def count_rs_to_dict(arrs):
dicts = []
for i in arrs:
rs_dict = {}
for k, v in enumerate(i):
rs_dict[k] = v
dicts.append(rs_dict)
return dicts
def np_nan_to_nonetype(inst):
arr = inst.features
index = np.isnan(arr)
if index.any():
inst = copy.deepcopy(inst)
arr = arr.astype(object)
arr[index] = NoneType()
inst.features = arr
return inst
class PSI(ModelBase):
def __init__(self):
super(PSI, self).__init__()
self.model_param = PSIParam()
self.max_bin_num = 20
self.tag_id_mapping = {}
self.id_tag_mapping = {}
self.count1, self.count2 = None, None
self.actual_table, self.expect_table = None, None
self.data_bin1, self.data_bin2 = None, None
self.bin_split_points = None
self.bin_sparse_points = None
self.psi_rs = None
self.total_scores = None
self.all_feature_list = None
self.dense_missing_val = NoneType()
self.binning_error = consts.DEFAULT_RELATIVE_ERROR
self.interval_perc1 = None
self.interval_perc2 = None
self.str_intervals = None
self.binning_obj = None
def _init_model(self, model: PSIParam):
self.max_bin_num = model.max_bin_num
self.need_run = model.need_run
self.dense_missing_val = NoneType() if model.dense_missing_val is None else model.dense_missing_val
self.binning_error = model.binning_error
@staticmethod
def check_table_content(tb):
if not tb.count() > 0:
raise ValueError('input table must contains at least 1 sample')
first_ = tb.take(1)[0][1]
if isinstance(first_, Instance):
return True
else:
raise ValueError('unknown input format')
@staticmethod
def is_sparse(tb):
return isinstance(tb.take(1)[0][1].features, SparseVector)
@staticmethod
def check_duplicates(l_):
s = set(l_)
recorded = set()
new_l = []
for i in l_:
if i in s and i not in recorded:
new_l.append(i)
recorded.add(i)
return new_l
@staticmethod
def get_string_interval(data_split_points, id_tag_mapping, missing_bin_idx):
# generate string interval from bin_split_points
feature_interval = []
for feat_idx, interval in enumerate(data_split_points):
idx2intervals = {}
l_ = list(interval)
l_[-1] = 'inf'
l_.insert(0, '-inf')
idx = 0
for s, e in zip(l_[:-1], l_[1:]):
interval_str = str(id_tag_mapping[feat_idx])
if s != '-inf':
interval_str = str(np.round(s, ROUND_NUM)) + "<" + interval_str
if e != 'inf':
interval_str = interval_str + "<=" + str(np.round(e, ROUND_NUM))
idx2intervals[idx] = interval_str
idx += 1
idx2intervals[missing_bin_idx] = 'missing'
feature_interval.append(idx2intervals)
return feature_interval
@staticmethod
def post_process_result(rs_dict, interval_dict,):
# convert bin idx to str intervals
# then divide count by sample num to get percentage
#
rs_val_list, interval_list = [], []
for key in sorted(interval_dict.keys()):
corresponding_str_interval = interval_dict[key]
val = rs_dict[key]
rs_val_list.append(np.round(val, ROUND_NUM))
interval_list.append(corresponding_str_interval)
return rs_val_list, interval_list
@staticmethod
def count_dict_to_percentage(count_rs, sample_num):
for c in count_rs:
for k in c:
c[k] = c[k] / sample_num
return count_rs
@staticmethod
def convert_missing_val(table):
new_table = table.mapValues(np_nan_to_nonetype)
new_table.schema = table.schema
return new_table
def fit(self, expect_table, actual_table):
LOGGER.info('start psi computing')
header1 = expect_table.schema['header']
header2 = actual_table.schema['header']
if not set(header1) == set(header2):
raise ValueError('table header must be the same while computing psi values')
# baseline table should not contain empty columns
abnormal_detection.empty_column_detection(expect_table)
self.all_feature_list = header1
# make sure no duplicate features
self.all_feature_list = self.check_duplicates(self.all_feature_list)
# kv bi-directional mapping
self.tag_id_mapping = {v: k for k, v in enumerate(self.all_feature_list)}
self.id_tag_mapping = {k: v for k, v in enumerate(self.all_feature_list)}
if not self.is_sparse(expect_table): # convert missing value: nan to NoneType
expect_table = self.convert_missing_val(expect_table)
if not self.is_sparse(actual_table): # convert missing value: nan to NoneType
actual_table = self.convert_missing_val(actual_table)
if not (self.check_table_content(expect_table) and self.check_table_content(actual_table)):
raise ValueError('contents of input table must be instances of class "Instance"')
param = FeatureBinningParam(method=consts.QUANTILE, bin_num=self.max_bin_num, local_only=True,
error=self.binning_error)
binning_obj = QuantileBinning(params=param, abnormal_list=[NoneType()], allow_duplicate=False)
binning_obj.fit_split_points(expect_table)
data_bin, bin_split_points, bin_sparse_points = binning_obj.convert_feature_to_bin(expect_table)
LOGGER.debug('bin split points is {}, shape is {}'.format(bin_split_points, bin_split_points.shape))
self.binning_obj = binning_obj
self.data_bin1 = data_bin
self.bin_split_points = bin_split_points
self.bin_sparse_points = bin_sparse_points
LOGGER.debug('expect table binning done')
count_func1 = functools.partial(map_partition_handle,
feat_num=len(self.all_feature_list),
max_bin_num=self.max_bin_num + 1, # an additional bin for missing value
missing_val=self.dense_missing_val,
is_sparse=self.is_sparse(self.data_bin1))
map_rs1 = self.data_bin1.applyPartitions(count_func1)
count1 = count_rs_to_dict(map_rs1.reduce(map_partition_reduce))
data_bin2, bin_split_points2, bin_sparse_points2 = binning_obj.convert_feature_to_bin(actual_table)
self.data_bin2 = data_bin2
LOGGER.debug('actual table binning done')
count_func2 = functools.partial(map_partition_handle,
feat_num=len(self.all_feature_list),
max_bin_num=self.max_bin_num + 1, # an additional bin for missing value
missing_val=self.dense_missing_val,
is_sparse=self.is_sparse(self.data_bin2))
map_rs2 = self.data_bin2.applyPartitions(count_func2)
count2 = count_rs_to_dict(map_rs2.reduce(map_partition_reduce))
self.count1, self.count2 = count1, count2
LOGGER.info('psi counting done')
# compute psi from counting result
psi_result = psi_computer(count1, count2, expect_table.count(), actual_table.count())
self.psi_rs = psi_result
# get total psi score of features
total_scores = {}
for idx, rs in enumerate(self.psi_rs):
feat_name = self.id_tag_mapping[idx]
total_scores[feat_name] = rs['total_psi']
self.total_scores = total_scores
# id-feature mapping convert, str interval computation
self.str_intervals = self.get_string_interval(bin_split_points, self.id_tag_mapping,
missing_bin_idx=self.max_bin_num)
self.interval_perc1 = self.count_dict_to_percentage(copy.deepcopy(count1), expect_table.count())
self.interval_perc2 = self.count_dict_to_percentage(copy.deepcopy(count2), actual_table.count())
self.set_summary(self.generate_summary())
LOGGER.info('psi computation done')
def generate_summary(self):
return {'psi_scores': self.total_scores}
def export_model(self):
if not self.need_run:
return None
psi_summary = PsiSummary()
psi_summary.total_score.update(self.total_scores)
LOGGER.debug('psi total score is {}'.format(dict(psi_summary.total_score)))
psi_summary.model_name = consts.PSI
feat_psi_list = []
for id_ in self.id_tag_mapping:
feat_psi_summary = FeaturePsi()
feat_name = self.id_tag_mapping[id_]
feat_psi_summary.feature_name = feat_name
interval_psi, str_intervals = self.post_process_result(self.psi_rs[id_], self.str_intervals[id_])
interval_perc1, _ = self.post_process_result(self.interval_perc1[id_], self.str_intervals[id_])
interval_perc2, _ = self.post_process_result(self.interval_perc2[id_], self.str_intervals[id_])
feat_psi_summary.psi.extend(interval_psi)
feat_psi_summary.expect_perc.extend(interval_perc1)
feat_psi_summary.actual_perc.extend(interval_perc2)
feat_psi_summary.interval.extend(str_intervals)
feat_psi_list.append(feat_psi_summary)
psi_summary.feature_psi.extend(feat_psi_list)
LOGGER.debug('export model done')
meta = PSIMeta()
meta.max_bin_num = self.max_bin_num
return {'PSIParam': psi_summary, 'PSIMeta': meta}
| 12,582 | 34.545198 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/psi/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/statistic/psi/test/test_psi.py
|
import unittest
import numpy as np
import time
import copy
import uuid
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.statistic.psi.psi import PSI
from federatedml.param.psi_param import PSIParam
class TestPSI(unittest.TestCase):
def setUp(self):
session.init('test', 0)
print('generating dense tables')
l1, l2 = [], []
col = [i for i in range(20)]
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l1.append(inst)
for i in range(1000):
inst = Instance()
inst.features = np.random.random(20)
l2.append(inst)
self.dense_table1, self.dense_table2 = session.parallelize(l1, partition=4, include_key=False), \
session.parallelize(l2, partition=4, include_key=False)
self.dense_table1.schema['header'] = copy.deepcopy(col)
self.dense_table2.schema['header'] = copy.deepcopy(col)
print('generating done')
print('generating sparse tables')
l1, l2 = [], []
col = [i for i in range(20)]
for i in range(100):
inst = Instance()
inst.features = SparseVector(indices=copy.deepcopy(col), data=list(np.random.random(20)))
l1.append(inst)
for i in range(1000):
inst = Instance()
inst.features = SparseVector(indices=copy.deepcopy(col), data=list(np.random.random(20)))
l2.append(inst)
self.sp_table1, self.sp_table2 = session.parallelize(l1, partition=4, include_key=False), \
session.parallelize(l2, partition=4, include_key=False)
self.sp_table1.schema['header'] = copy.deepcopy(col)
self.sp_table2.schema['header'] = copy.deepcopy(col)
print('generating done')
def test_dense_psi(self):
param = PSIParam()
psi = PSI()
psi._init_model(param)
psi.fit(self.dense_table1, self.dense_table2)
print('dense testing done')
def test_sparse_psi(self):
param = PSIParam()
psi = PSI()
psi._init_model(param)
psi.fit(self.sp_table1, self.sp_table2)
print('dense testing done')
if __name__ == "__main__":
unittest.main()
| 2,385 | 32.605634 | 105 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/match_id_process.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import functools
from federatedml.feature.instance import Instance
from federatedml.transfer_variable.transfer_class.match_id_intersect_transfer_variable import \
MatchIDIntersectTransferVariable
from federatedml.util import consts
from federatedml.util import LOGGER
class MatchIDIntersect(object):
"""
This will support repeated ID intersection using ID expanding.
"""
def __init__(self, sample_id_generator: str, role: str):
self.sample_id_generator = sample_id_generator
self.transfer_variable = MatchIDIntersectTransferVariable()
self.role = role
self.id_map = None
self.version = None
self.owner_src_data = None
self.data_type = None
self.with_sample_id = False
def __get_data_type(self, data):
if self.data_type is None:
one_feature = data.first()
if isinstance(one_feature[1], Instance):
self.data_type = Instance
else:
self.data_type = list
return self.data_type
@staticmethod
def __to_id_map(data):
id_map = defaultdict(list)
for d in data:
idx = d[1].features[0] if isinstance(d[1], Instance) else d[1][0]
id_map[idx].append(d[0])
return [(k, v) for k, v in id_map.items()]
@staticmethod
def __reduce_id_map(x1, x2):
return x1 + x2
@staticmethod
def __to_sample_id_map(data):
id_map = defaultdict(list)
for d in data:
id_map[d[1].inst_id].append(d[0])
return [(k, v) for k, v in id_map.items()]
def __generate_id_map(self, data):
if self.role != self.sample_id_generator:
LOGGER.warning("Not a repeated id owner, will not generate id map")
return
if not self.with_sample_id:
all_id_map = data.mapReducePartitions(self.__to_id_map, self.__reduce_id_map)
id_map = all_id_map.filter(lambda k, v: len(v) >= 2)
else:
id_map = data.mapReducePartitions(self.__to_sample_id_map, self.__reduce_id_map)
return id_map
@staticmethod
def __func_restructure_id(k, id_map: list):
return [(new_id, k) for new_id in id_map]
@staticmethod
def __func_restructure_id_for_partner(k, v):
data, id_map = v[0], v[1]
return [(new_id, data) for new_id in id_map]
@staticmethod
def __func_restructure_sample_id_for_partner(k, v):
data, id_map = v[0], v[1]
return [(new_id, data) for new_id in id_map]
@staticmethod
def __func_restructure_instance(v):
v.features = v.features[1:]
return v
def __restructure_owner_sample_ids(self, data, id_map):
rids = id_map.flatMap(functools.partial(self.__func_restructure_id))
if not self.with_sample_id:
_data = data.union(rids, lambda dv, rv: dv)
if self.__get_data_type(self.owner_src_data) == Instance:
r_data = self.owner_src_data.join(_data, lambda ov, dv: self.__func_restructure_instance(ov))
else:
r_data = self.owner_src_data.join(_data, lambda ov, dv: ov[1:])
r_data.schema = self.owner_src_data.schema
if r_data.schema.get('header') is not None:
r_data.schema['header'] = r_data.schema['header'][1:]
else:
r_data = self.owner_src_data.join(rids, lambda ov, dv: ov)
r_data.schema = self.owner_src_data.schema
return r_data
def __restructure_partner_sample_ids(self, data, id_map, match_data=None):
data = data.join(match_data, lambda k, v: v)
_data = data.join(id_map, lambda dv, iv: (dv, iv))
# LOGGER.debug(f"_data is: {_data.first()}")
repeated_ids = _data.flatMap(functools.partial(self.__func_restructure_id_for_partner))
# LOGGER.debug(f"restructure id for partner called, result is: {repeated_ids.first()}")
if not self.with_sample_id:
sub_data = data.subtractByKey(id_map)
expand_data = sub_data.union(repeated_ids, lambda sv, rv: sv)
else:
expand_data = repeated_ids
expand_data.schema = data.schema
if match_data:
expand_data.schema = match_data.schema
return expand_data
def __restructure_sample_ids(self, data, id_map, match_data=None):
# LOGGER.debug(f"id map is: {self.id_map.first()}")
if self.role == self.sample_id_generator:
return self.__restructure_owner_sample_ids(data, id_map)
else:
return self.__restructure_partner_sample_ids(data, id_map, match_data)
def generate_intersect_data(self, data):
if self.__get_data_type(data) == Instance:
if not self.with_sample_id:
_data = data.map(
lambda k, v: (v.features[0], 1))
else:
_data = data.map(lambda k, v: (v.inst_id, v))
else:
_data = data.mapValues(lambda k, v: (v[0], 1))
_data.schema = data.schema
LOGGER.info("Finish recover real ids")
return _data
def use_sample_id(self):
self.with_sample_id = True
def recover(self, data):
LOGGER.info("Start repeated id processing.")
if self.role == self.sample_id_generator:
LOGGER.info("Start to generate id_map")
self.id_map = self.__generate_id_map(data)
self.owner_src_data = data
else:
if not self.with_sample_id:
LOGGER.info("Not sample_id_generator, return!")
return data
return self.generate_intersect_data(data)
def expand(self, data, owner_only=False, match_data=None):
if self.sample_id_generator == consts.HOST:
id_map_federation = self.transfer_variable.id_map_from_host
partner_role = consts.GUEST
else:
id_map_federation = self.transfer_variable.id_map_from_guest
partner_role = consts.HOST
if self.sample_id_generator == self.role:
self.id_map = self.id_map.join(data, lambda i, d: i)
LOGGER.info("Find repeated id_map from intersection ids")
if not owner_only:
id_map_federation.remote(self.id_map,
role=partner_role,
idx=-1)
LOGGER.info("Remote id_map to partner")
else:
if owner_only:
return data
self.id_map = id_map_federation.get(idx=0)
LOGGER.info("Get id_map from owner.")
return self.__restructure_sample_ids(data, self.id_map, match_data)
| 7,376 | 35.161765 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/intersect_model.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.common.base_utils import fate_uuid
from federatedml.feature.instance import Instance
from federatedml.model_base import Metric, MetricMeta
from federatedml.model_base import ModelBase
from federatedml.param.intersect_param import IntersectParam
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.statistic import data_overview
from federatedml.statistic.intersect import RawIntersectionHost, RawIntersectionGuest, RsaIntersectionHost, \
RsaIntersectionGuest, DhIntersectionGuest, DhIntersectionHost, EcdhIntersectionHost, EcdhIntersectionGuest
from federatedml.statistic.intersect.match_id_process import MatchIDIntersect
from federatedml.transfer_variable.transfer_class.intersection_func_transfer_variable import \
IntersectionFuncTransferVariable
from federatedml.util import consts, LOGGER, data_format_preprocess
class IntersectModelBase(ModelBase):
def __init__(self):
super().__init__()
self.intersection_obj = None
self.proc_obj = None
# self.intersect_num = -1
self.intersect_rate = -1
self.unmatched_num = -1
self.unmatched_rate = -1
self.intersect_ids = None
self.metric_name = "intersection"
self.metric_namespace = "train"
self.metric_type = "INTERSECTION"
self.model_param_name = "IntersectModelParam"
self.model_meta_name = "IntersectModelMeta"
self.model_param = IntersectParam()
self.use_match_id_process = False
self.role = None
self.intersect_method = None
self.match_id_num = None
self.match_id_intersect_num = -1
self.recovered_num = -1
self.guest_party_id = None
self.host_party_id = None
self.host_party_id_list = None
self.transfer_variable = IntersectionFuncTransferVariable()
def _init_model(self, params):
self.model_param = params
self.intersect_preprocess_params = params.intersect_preprocess_params
def init_intersect_method(self):
if self.model_param.cardinality_only:
self.intersect_method = self.model_param.cardinality_method
else:
self.intersect_method = self.model_param.intersect_method
LOGGER.info("Using {} intersection, role is {}".format(self.intersect_method, self.role))
self.host_party_id_list = self.component_properties.host_party_idlist
self.guest_party_id = self.component_properties.guest_partyid
if self.role not in [consts.HOST, consts.GUEST]:
raise ValueError("role {} is not support".format(self.role))
def get_model_summary(self):
return {"intersect_num": self.match_id_intersect_num, "intersect_rate": self.intersect_rate,
"cardinality_only": self.intersection_obj.cardinality_only,
"unique_id_num": self.match_id_num}
def sync_use_match_id(self):
raise NotImplementedError(f"Should not be called here.")
def __share_info(self, data):
LOGGER.info("Start to share information with another role")
info_share = self.transfer_variable.info_share_from_guest if self.model_param.info_owner == consts.GUEST else \
self.transfer_variable.info_share_from_host
party_role = consts.GUEST if self.model_param.info_owner == consts.HOST else consts.HOST
if self.role == self.model_param.info_owner:
if data.schema.get('header') is not None:
try:
share_info_col_idx = data.schema.get('header').index(consts.SHARE_INFO_COL_NAME)
one_data = data.first()
if isinstance(one_data[1], Instance):
share_data = data.join(self.intersect_ids, lambda d, i: [d.features[share_info_col_idx]])
else:
share_data = data.join(self.intersect_ids, lambda d, i: [d[share_info_col_idx]])
info_share.remote(share_data,
role=party_role,
idx=-1)
LOGGER.info("Remote share information to {}".format(party_role))
except Exception as e:
LOGGER.warning("Something unexpected:{}, share a empty information to {}".format(e, party_role))
share_data = self.intersect_ids.mapValues(lambda v: ['null'])
info_share.remote(share_data,
role=party_role,
idx=-1)
else:
raise ValueError(
"'allow_info_share' is true, and 'info_owner' is {}, but can not get header in data, information sharing not done".format(
self.model_param.info_owner))
else:
self.intersect_ids = info_share.get(idx=0)
self.intersect_ids.schema['header'] = [consts.SHARE_INFO_COL_NAME]
LOGGER.info(
"Get share information from {}, header:{}".format(self.model_param.info_owner, self.intersect_ids))
return self.intersect_ids
def __sync_join_id(self, data, intersect_data):
LOGGER.debug(f"data count: {data.count()}")
LOGGER.debug(f"intersect_data count: {intersect_data.count()}")
if self.model_param.sample_id_generator == consts.GUEST:
sync_join_id = self.transfer_variable.join_id_from_guest
else:
sync_join_id = self.transfer_variable.join_id_from_host
if self.role == self.model_param.sample_id_generator:
join_data = data.subtractByKey(intersect_data)
# LOGGER.debug(f"join_data count: {join_data.count()}")
if self.model_param.new_sample_id:
if self.model_param.only_output_key:
join_data = join_data.map(lambda k, v: (fate_uuid(), None))
join_id = join_data
else:
join_data = join_data.map(lambda k, v: (fate_uuid(), v))
join_id = join_data.mapValues(lambda v: None)
sync_join_id.remote(join_id)
result_data = intersect_data.union(join_data)
else:
join_id = join_data.map(lambda k, v: (k, None))
result_data = data
if self.model_param.only_output_key:
result_data = data.mapValues(lambda v: None)
sync_join_id.remote(join_id)
else:
join_id = sync_join_id.get(idx=0)
# LOGGER.debug(f"received join_id count: {join_id.count()}")
join_data = join_id
if not self.model_param.only_output_key:
feature_shape = data.first()[1].features.shape[0]
def _generate_nan_instance():
filler = np.empty((feature_shape,))
filler.fill(np.nan)
return filler
join_data = join_id.mapValues(lambda v: Instance(features=_generate_nan_instance()))
result_data = intersect_data.union(join_data)
LOGGER.debug(f"result data count: {result_data.count()}")
return result_data
def callback(self):
meta_info = {"intersect_method": self.intersect_method,
"join_method": self.model_param.join_method}
if self.use_match_id_process:
self.callback_metric(metric_name=self.metric_name,
metric_namespace=self.metric_namespace,
metric_data=[Metric("intersect_count", self.match_id_intersect_num),
Metric("input_match_id_count", self.match_id_num),
Metric("intersect_rate", self.intersect_rate),
Metric("unmatched_count", self.unmatched_num),
Metric("unmatched_rate", self.unmatched_rate),
Metric("intersect_sample_id_count", self.recovered_num)])
else:
self.callback_metric(metric_name=self.metric_name,
metric_namespace=self.metric_namespace,
metric_data=[Metric("intersect_count", self.match_id_intersect_num),
Metric("input_match_id_count", self.match_id_num),
Metric("intersect_rate", self.intersect_rate),
Metric("unmatched_count", self.unmatched_num),
Metric("unmatched_rate", self.unmatched_rate)])
self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,
metric_name=self.metric_name,
metric_meta=MetricMeta(name=self.metric_name,
metric_type=self.metric_type,
extra_metas=meta_info)
)
def callback_cache_meta(self, intersect_meta):
metric_name = f"{self.metric_name}_cache_meta"
self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,
metric_name=metric_name,
metric_meta=MetricMeta(name=f"{self.metric_name}_cache_meta",
metric_type=self.metric_type,
extra_metas=intersect_meta)
)
def fit(self, data):
if self.component_properties.caches:
LOGGER.info(f"Cache provided, will enter intersect online process.")
return self.intersect_online_process(data, self.component_properties.caches)
self.init_intersect_method()
if data_overview.check_with_inst_id(data):
self.use_match_id_process = True
LOGGER.info(f"use match_id_process")
self.sync_use_match_id()
if self.use_match_id_process:
if len(self.host_party_id_list) > 1 and self.model_param.sample_id_generator != consts.GUEST:
raise ValueError("While multi-host, sample_id_generator should be guest.")
if self.intersect_method == consts.RAW:
if self.model_param.sample_id_generator != self.intersection_obj.join_role:
raise ValueError(f"When using raw intersect with match id process,"
f"'join_role' should be same role as 'sample_id_generator'")
else:
if not self.model_param.sync_intersect_ids:
if self.model_param.sample_id_generator != consts.GUEST:
self.model_param.sample_id_generator = consts.GUEST
LOGGER.warning(f"when not sync_intersect_ids with match id process,"
f"sample_id_generator is set to Guest")
self.proc_obj = MatchIDIntersect(sample_id_generator=self.model_param.sample_id_generator, role=self.role)
self.proc_obj.new_sample_id = self.model_param.new_sample_id
if data_overview.check_with_inst_id(data) or self.model_param.with_sample_id:
self.proc_obj.use_sample_id()
match_data = self.proc_obj.recover(data=data)
self.match_id_num = match_data.count()
if self.intersection_obj.run_cache:
self.cache_output = self.intersection_obj.generate_cache(match_data)
intersect_meta = self.intersection_obj.get_intersect_method_meta()
self.callback_cache_meta(intersect_meta)
return
if self.intersection_obj.cardinality_only:
self.intersection_obj.run_cardinality(match_data)
else:
intersect_data = match_data
if self.model_param.run_preprocess:
intersect_data = self.run_preprocess(match_data)
self.intersect_ids = self.intersection_obj.run_intersect(intersect_data)
if self.intersect_ids:
self.match_id_intersect_num = self.intersect_ids.count()
else:
if self.model_param.join_method == consts.LEFT_JOIN:
raise ValueError(f"Only data with match_id may apply left_join method. Please check input data format")
self.match_id_num = data.count()
if self.intersection_obj.run_cache:
self.cache_output = self.intersection_obj.generate_cache(data)
intersect_meta = self.intersection_obj.get_intersect_method_meta()
# LOGGER.debug(f"callback intersect meta is: {intersect_meta}")
self.callback_cache_meta(intersect_meta)
return
if self.intersection_obj.cardinality_only:
self.intersection_obj.run_cardinality(data)
else:
intersect_data = data
if self.model_param.run_preprocess:
intersect_data = self.run_preprocess(data)
self.intersect_ids = self.intersection_obj.run_intersect(intersect_data)
if self.intersect_ids:
self.match_id_intersect_num = self.intersect_ids.count()
if self.intersection_obj.cardinality_only:
if self.intersection_obj.intersect_num is not None:
# data_count = data.count()
self.match_id_intersect_num = self.intersection_obj.intersect_num
self.intersect_rate = self.match_id_intersect_num / self.match_id_num
self.unmatched_num = self.match_id_num - self.match_id_intersect_num
self.unmatched_rate = 1 - self.intersect_rate
self.set_summary(self.get_model_summary())
self.callback()
return None
if self.use_match_id_process:
if self.model_param.sync_intersect_ids:
self.intersect_ids = self.proc_obj.expand(self.intersect_ids, match_data=match_data)
else:
# self.intersect_ids = match_data
self.intersect_ids = self.proc_obj.expand(self.intersect_ids,
match_data=match_data,
owner_only=True)
if self.intersect_ids:
self.recovered_num = self.intersect_ids.count()
if self.model_param.only_output_key and self.intersect_ids:
self.intersect_ids = self.intersect_ids.mapValues(lambda v: Instance(inst_id=v.inst_id))
# self.intersect_ids.schema = {"match_id_name": data.schema["match_id_name"],
# "sid": data.schema.get("sid")}
self.intersect_ids.schema = data_format_preprocess.DataFormatPreProcess.clean_header(data.schema)
LOGGER.info("Finish intersection")
if self.intersect_ids:
self.intersect_rate = self.match_id_intersect_num / self.match_id_num
self.unmatched_num = self.match_id_num - self.match_id_intersect_num
self.unmatched_rate = 1 - self.intersect_rate
self.set_summary(self.get_model_summary())
self.callback()
result_data = self.intersect_ids
if not self.use_match_id_process and result_data:
if self.intersection_obj.only_output_key:
# result_data.schema = {"sid": data.schema.get("sid")}
result_data.schema = data_format_preprocess.DataFormatPreProcess.clean_header(data.schema)
LOGGER.debug(f"non-match-id & only_output_key, add sid to schema")
else:
result_data = self.intersection_obj.get_value_from_data(result_data, data)
LOGGER.debug(f"not only_output_key, restore instance value")
if self.model_param.join_method == consts.LEFT_JOIN:
result_data = self.__sync_join_id(data, self.intersect_ids)
result_data.schema = self.intersect_ids.schema
return result_data
def check_consistency(self):
pass
def load_intersect_meta(self, intersect_meta):
if self.model_param.intersect_method != intersect_meta.get("intersect_method"):
raise ValueError(f"Current intersect method must match to cache record.")
if self.model_param.intersect_method == consts.RSA:
self.model_param.rsa_params.hash_method = intersect_meta["hash_method"]
self.model_param.rsa_params.final_hash_method = intersect_meta["final_hash_method"]
self.model_param.rsa_params.salt = intersect_meta["salt"]
self.model_param.rsa_params.random_bit = intersect_meta["random_bit"]
elif self.model_param.intersect_method == consts.DH:
self.model_param.dh_params.hash_method = intersect_meta["hash_method"]
self.model_param.dh_params.salt = intersect_meta["salt"]
elif self.model_param.intersect_method == consts.ECDH:
self.model_param.ecdh_params.hash_method = intersect_meta["hash_method"]
self.model_param.ecdh_params.salt = intersect_meta["salt"]
self.model_param.ecdh_params.curve = intersect_meta["curve"]
else:
raise ValueError(f"{self.model_param.intersect_method} does not support cache.")
def make_filter_process(self, data_instances, hash_operator):
raise NotImplementedError("This method should not be called here")
def get_filter_process(self, data_instances, hash_operator):
raise NotImplementedError("This method should not be called here")
def run_preprocess(self, data_instances):
preprocess_hash_operator = Hash(self.model_param.intersect_preprocess_params.preprocess_method, False)
if self.role == self.model_param.intersect_preprocess_params.filter_owner:
data = self.make_filter_process(data_instances, preprocess_hash_operator)
else:
LOGGER.debug(f"before preprocess, data count: {data_instances.count()}")
data = self.get_filter_process(data_instances, preprocess_hash_operator)
LOGGER.debug(f"after preprocess, data count: {data.count()}")
return data
def intersect_online_process(self, data_inst, caches):
# LOGGER.debug(f"caches is: {caches}")
cache_data, cache_meta = list(caches.values())[0]
intersect_meta = list(cache_meta.values())[0]["intersect_meta"]
# LOGGER.debug(f"intersect_meta is: {intersect_meta}")
self.callback_cache_meta(intersect_meta)
self.load_intersect_meta(intersect_meta)
self.init_intersect_method()
self.intersection_obj.load_intersect_key(cache_meta)
if data_overview.check_with_inst_id(data_inst):
self.use_match_id_process = True
LOGGER.info(f"use match_id_process")
self.sync_use_match_id()
intersect_data = data_inst
self.match_id_num = data_inst.count()
if self.use_match_id_process:
if len(self.host_party_id_list) > 1 and self.model_param.sample_id_generator != consts.GUEST:
raise ValueError("While multi-host, sample_id_generator should be guest.")
if self.intersect_method == consts.RAW:
if self.model_param.sample_id_generator != self.intersection_obj.join_role:
raise ValueError(f"When using raw intersect with match id process,"
f"'join_role' should be same role as 'sample_id_generator'")
else:
if not self.model_param.sync_intersect_ids:
if self.model_param.sample_id_generator != consts.GUEST:
self.model_param.sample_id_generator = consts.GUEST
LOGGER.warning(f"when not sync_intersect_ids with match id process,"
f"sample_id_generator is set to Guest")
proc_obj = MatchIDIntersect(sample_id_generator=self.model_param.sample_id_generator, role=self.role)
proc_obj.new_sample_id = self.model_param.new_sample_id
if data_overview.check_with_inst_id(data_inst) or self.model_param.with_sample_id:
proc_obj.use_sample_id()
match_data = proc_obj.recover(data=data_inst)
intersect_data = match_data
self.match_id_num = match_data.count()
if self.role == consts.HOST:
cache_id = cache_meta[str(self.guest_party_id)].get("cache_id")
self.transfer_variable.cache_id.remote(cache_id, role=consts.GUEST, idx=0)
guest_cache_id = self.transfer_variable.cache_id.get(role=consts.GUEST, idx=0)
self.match_id_num = list(cache_data.values())[0].count()
if guest_cache_id != cache_id:
raise ValueError(f"cache_id check failed. cache_id from host & guest must match.")
elif self.role == consts.GUEST:
for i, party_id in enumerate(self.host_party_id_list):
cache_id = cache_meta[str(party_id)].get("cache_id")
self.transfer_variable.cache_id.remote(cache_id,
role=consts.HOST,
idx=i)
host_cache_id = self.transfer_variable.cache_id.get(role=consts.HOST, idx=i)
if host_cache_id != cache_id:
raise ValueError(f"cache_id check failed. cache_id from host & guest must match.")
else:
raise ValueError(f"Role {self.role} cannot run intersection transform.")
self.intersect_ids = self.intersection_obj.run_cache_intersect(intersect_data, cache_data)
self.match_id_intersect_num = self.intersect_ids.count()
if self.use_match_id_process:
if not self.model_param.sync_intersect_ids:
self.intersect_ids = proc_obj.expand(self.intersect_ids,
match_data=match_data,
owner_only=True)
else:
self.intersect_ids = proc_obj.expand(self.intersect_ids, match_data=match_data)
if self.intersect_ids:
self.recovered_num = self.intersect_ids.count()
if self.intersect_ids and self.model_param.only_output_key:
self.intersect_ids = self.intersect_ids.mapValues(lambda v: Instance(inst_id=v.inst_id))
# self.intersect_ids.schema = {"match_id_name": data_inst.schema["match_id_name"],
# "sid": data_inst.schema.get("sid")}
self.intersect_ids.schema = data_format_preprocess.DataFormatPreProcess.clean_header(data_inst.schema)
LOGGER.info("Finish intersection")
if self.intersect_ids:
self.intersect_rate = self.match_id_intersect_num / self.match_id_num
self.unmatched_num = self.match_id_num - self.match_id_intersect_num
self.unmatched_rate = 1 - self.intersect_rate
self.set_summary(self.get_model_summary())
self.callback()
result_data = self.intersect_ids
if not self.use_match_id_process:
if not self.intersection_obj.only_output_key and result_data:
result_data = self.intersection_obj.get_value_from_data(result_data, data_inst)
self.intersect_ids.schema = result_data.schema
LOGGER.debug(f"not only_output_key, restore value called")
if self.intersection_obj.only_output_key and result_data:
# schema = {"sid": data_inst.schema.get("sid")}
schema = data_format_preprocess.DataFormatPreProcess.clean_header(data_inst.schema)
result_data = result_data.mapValues(lambda v: None)
result_data.schema = schema
self.intersect_ids.schema = schema
if self.model_param.join_method == consts.LEFT_JOIN:
result_data = self.__sync_join_id(data_inst, self.intersect_ids)
result_data.schema = self.intersect_ids.schema
return result_data
class IntersectHost(IntersectModelBase):
def __init__(self):
super().__init__()
self.role = consts.HOST
def init_intersect_method(self):
super().init_intersect_method()
self.host_party_id = self.component_properties.local_partyid
if self.intersect_method == consts.RSA:
self.intersection_obj = RsaIntersectionHost()
elif self.intersect_method == consts.RAW:
self.intersection_obj = RawIntersectionHost()
self.intersection_obj.tracker = self.tracker
self.intersection_obj.task_version_id = self.task_version_id
elif self.intersect_method == consts.DH:
self.intersection_obj = DhIntersectionHost()
elif self.intersect_method == consts.ECDH:
self.intersection_obj = EcdhIntersectionHost()
else:
raise ValueError("intersect_method {} is not support yet".format(self.intersect_method))
self.intersection_obj.host_party_id = self.host_party_id
self.intersection_obj.guest_party_id = self.guest_party_id
self.intersection_obj.host_party_id_list = self.host_party_id_list
self.intersection_obj.load_params(self.model_param)
self.model_param = self.intersection_obj.model_param
def sync_use_match_id(self):
self.transfer_variable.use_match_id.remote(self.use_match_id_process, role=consts.GUEST, idx=-1)
LOGGER.info(f"sync use_match_id flag: {self.use_match_id_process} with Guest")
def make_filter_process(self, data_instances, hash_operator):
filter = self.intersection_obj.construct_filter(data_instances,
self.intersect_preprocess_params.false_positive_rate,
self.intersect_preprocess_params.hash_method,
self.intersect_preprocess_params.random_state,
hash_operator,
self.intersect_preprocess_params.preprocess_salt)
self.transfer_variable.intersect_filter_from_host.remote(filter, role=consts.GUEST, idx=0)
LOGGER.debug(f"filter sent to guest")
return data_instances
def get_filter_process(self, data_instances, hash_operator):
filter = self.transfer_variable.intersect_filter_from_guest.get(idx=0)
LOGGER.debug(f"got filter from guest")
filtered_data = data_instances.filter(lambda k, v: filter.check(
hash_operator.compute(k, suffix_salt=self.intersect_preprocess_params.preprocess_salt)))
return filtered_data
class IntersectGuest(IntersectModelBase):
def __init__(self):
super().__init__()
self.role = consts.GUEST
def init_intersect_method(self):
super().init_intersect_method()
if self.intersect_method == consts.RSA:
self.intersection_obj = RsaIntersectionGuest()
elif self.intersect_method == consts.RAW:
self.intersection_obj = RawIntersectionGuest()
self.intersection_obj.tracker = self.tracker
self.intersection_obj.task_version_id = self.task_version_id
elif self.intersect_method == consts.DH:
self.intersection_obj = DhIntersectionGuest()
elif self.intersect_method == consts.ECDH:
self.intersection_obj = EcdhIntersectionGuest()
else:
raise ValueError("intersect_method {} is not support yet".format(self.intersect_method))
self.intersection_obj.guest_party_id = self.guest_party_id
self.intersection_obj.host_party_id_list = self.host_party_id_list
self.intersection_obj.load_params(self.model_param)
def sync_use_match_id(self):
host_use_match_id_flg = self.transfer_variable.use_match_id.get(idx=-1)
LOGGER.info(f"received use_match_id flag from all hosts.")
if any(flg != self.use_match_id_process for flg in host_use_match_id_flg):
raise ValueError(f"Not all parties' input data have match_id, please check.")
def make_filter_process(self, data_instances, hash_operator):
filter = self.intersection_obj.construct_filter(data_instances,
self.intersect_preprocess_params.false_positive_rate,
self.intersect_preprocess_params.hash_method,
self.intersect_preprocess_params.random_state,
hash_operator,
self.intersect_preprocess_params.preprocess_salt)
self.transfer_variable.intersect_filter_from_guest.remote(filter, role=consts.HOST, idx=-1)
LOGGER.debug(f"filter sent to guest")
return data_instances
def get_filter_process(self, data_instances, hash_operator):
filter_list = self.transfer_variable.intersect_filter_from_host.get(idx=-1)
LOGGER.debug(f"got filter from all host")
filtered_data_list = [
data_instances.filter(
lambda k,
v: filter.check(
hash_operator.compute(
k,
suffix_salt=self.intersect_preprocess_params.preprocess_salt))) for filter in filter_list]
filtered_data = self.intersection_obj.get_common_intersection(filtered_data_list, False)
return filtered_data
| 30,728 | 51.618151 | 142 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/base_intersect.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
from federatedml.param.intersect_param import IntersectParam
from federatedml.statistic.intersect.intersect_preprocess import BitArray
from federatedml.transfer_variable.transfer_class.intersection_func_transfer_variable \
import IntersectionFuncTransferVariable
from federatedml.util import LOGGER
class Intersect(object):
def __init__(self):
super().__init__()
self.cache_id = None
self.model_param = IntersectParam()
self.transfer_variable = None
self.cache_transfer_variable = IntersectionFuncTransferVariable().cache_id_from_host
self.filter = None
self.intersect_num = None
self.cache = None
self.model_param_name = "IntersectModelParam"
self.model_meta_name = "IntersectModelMeta"
self.intersect_method = None
self._guest_id = None
self._host_id = None
self._host_id_list = None
def load_params(self, param):
self.model_param = param
self.only_output_key = param.only_output_key
self.sync_intersect_ids = param.sync_intersect_ids
self.cardinality_only = param.cardinality_only
self.sync_cardinality = param.sync_cardinality
self.cardinality_method = param.cardinality_method
self.run_preprocess = param.run_preprocess
self.intersect_preprocess_params = param.intersect_preprocess_params
self.run_cache = param.run_cache
@property
def guest_party_id(self):
return self._guest_id
@guest_party_id.setter
def guest_party_id(self, guest_id):
if not isinstance(guest_id, int):
raise ValueError("party id should be integer, but get {}".format(guest_id))
self._guest_id = guest_id
@property
def host_party_id(self):
return self._host_id
@host_party_id.setter
def host_party_id(self, host_id):
if not isinstance(host_id, int):
raise ValueError("party id should be integer, but get {}".format(host_id))
self._host_id = host_id
@property
def host_party_id_list(self):
return self._host_id_list
@host_party_id_list.setter
def host_party_id_list(self, host_id_list):
if not isinstance(host_id_list, list):
raise ValueError(
"type host_party_id should be list, but get {} with {}".format(type(host_id_list), host_id_list))
self._host_id_list = host_id_list
def get_intersect_method_meta(self):
pass
def get_intersect_key(self, party_id):
pass
def load_intersect_key(self, cache_meta):
pass
def run_intersect(self, data_instances):
raise NotImplementedError("method should not be called here")
def run_cardinality(self, data_instances):
raise NotImplementedError("method should not be called here")
def generate_cache(self, data_instances):
raise NotImplementedError("method should not be called here")
@staticmethod
def extract_cache_list(cache_data, party_list):
if not isinstance(party_list, list):
party_list = [party_list]
cache_list = [cache_data.get(str(party_id)) for party_id in party_list]
if (cache_len := len(cache_list)) != (data_len := len(cache_data.items())):
LOGGER.warning(f"{cache_len} cache sets are given,"
f"but only {data_len} hosts participate in current intersection task.")
return cache_list
def run_cache_intersect(self, data_instances, cache_data):
raise NotImplementedError("method should not be called here")
def set_flowid(self, flowid=0):
if self.transfer_variable is not None:
self.transfer_variable.set_flowid(flowid)
@staticmethod
def get_value_from_data(intersect_ids, data_instances):
if intersect_ids is not None:
intersect_ids = intersect_ids.join(data_instances, lambda i, d: d)
intersect_ids.schema = data_instances.schema
LOGGER.info("obtain intersect data_instances!")
return intersect_ids
@staticmethod
def get_common_intersection(intersect_ids_list: list, keep_encrypt_ids=False):
if len(intersect_ids_list) == 1:
return intersect_ids_list[0]
if keep_encrypt_ids:
def f(v_prev, v): return v_prev + v
else:
def f(v_prev, v): return None
intersect_ids = None
for i, value in enumerate(intersect_ids_list):
if intersect_ids is None:
intersect_ids = value
continue
intersect_ids = intersect_ids.join(value, f)
return intersect_ids
@staticmethod
def extract_intersect_ids(intersect_ids, all_ids, keep_both=False):
if keep_both:
intersect_ids = intersect_ids.join(all_ids, lambda e, h: [e, h])
else:
intersect_ids = intersect_ids.join(all_ids, lambda e, h: h)
return intersect_ids
@staticmethod
def filter_intersect_ids(encrypt_intersect_ids, keep_encrypt_ids=False):
if keep_encrypt_ids:
def f(k, v): return (v, [k])
else:
def f(k, v): return (v, None)
if len(encrypt_intersect_ids) > 1:
raw_intersect_ids = [e.map(f) for e in encrypt_intersect_ids]
intersect_ids = Intersect.get_common_intersection(raw_intersect_ids, keep_encrypt_ids)
else:
intersect_ids = encrypt_intersect_ids[0]
intersect_ids = intersect_ids.map(f)
return intersect_ids
@staticmethod
def map_raw_id_to_encrypt_id(raw_id_data, encrypt_id_data, keep_value=False):
encrypt_id_data_exchange_kv = encrypt_id_data.map(lambda k, v: (v, k))
encrypt_raw_id = raw_id_data.join(encrypt_id_data_exchange_kv, lambda r, e: (e, r))
if keep_value:
encrypt_common_id = encrypt_raw_id.map(lambda k, v: (v[0], v[1]))
else:
encrypt_common_id = encrypt_raw_id.map(lambda k, v: (v[0], None))
return encrypt_common_id
@staticmethod
def map_encrypt_id_to_raw_id(encrypt_id_data, raw_id_data, keep_encrypt_id=True):
"""
Parameters
----------
encrypt_id_data: E(id)
raw_id_data: (E(id), (id, v))
keep_encrypt_id: bool
Returns
-------
(id, E(id))
"""
encrypt_id_raw_id = raw_id_data.join(encrypt_id_data, lambda r, e: r)
if keep_encrypt_id:
raw_id = encrypt_id_raw_id.map(lambda k, v: (v[0], k))
else:
raw_id = encrypt_id_raw_id.map(lambda k, v: (v[0], None))
return raw_id
@staticmethod
def hash(value, hash_operator, salt=''):
h_value = hash_operator.compute(value, suffix_salt=salt)
return h_value
@staticmethod
def insert_key(kv_iterator, filter, hash_operator=None, salt=None):
res_filter = None
for k, _ in kv_iterator:
if hash_operator:
res_filter = filter.insert(hash_operator.compute(k, suffix_salt=salt))
else:
res_filter = filter.insert(k)
return res_filter
@staticmethod
def count_key_in_filter(kv_iterator, filter):
count = 0
for k, _ in kv_iterator:
count += filter.check(k)
return count
@staticmethod
def construct_filter(data, false_positive_rate, hash_method, random_state, hash_operator=None, salt=None):
n = data.count()
m, k = BitArray.get_filter_param(n, false_positive_rate)
filter = BitArray(m, k, hash_method, random_state)
LOGGER.debug(f"filter bit count is: {filter.bit_count}")
LOGGER.debug(f"filter hash func count: {filter.hash_func_count}")
f = functools.partial(Intersect.insert_key, filter=filter, hash_operator=hash_operator, salt=salt)
new_array = data.mapPartitions(f).reduce(lambda x, y: x | y)
LOGGER.debug(f"filter array obtained")
filter.set_array(new_array)
# LOGGER.debug(f"after insert, filter sparsity is: {filter.sparsity}")
return filter
| 8,751 | 35.619247 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.statistic.intersect.base_intersect import Intersect
from federatedml.statistic.intersect.raw_intersect.raw_intersect_base import RawIntersect
from federatedml.statistic.intersect.raw_intersect.raw_intersect_guest import RawIntersectionGuest
from federatedml.statistic.intersect.raw_intersect.raw_intersect_host import RawIntersectionHost
from federatedml.statistic.intersect.rsa_intersect.rsa_intersect_base import RsaIntersect
from federatedml.statistic.intersect.rsa_intersect.rsa_intersect_guest import RsaIntersectionGuest
from federatedml.statistic.intersect.rsa_intersect.rsa_intersect_host import RsaIntersectionHost
from federatedml.statistic.intersect.dh_intersect.dh_intersect_base import DhIntersect
from federatedml.statistic.intersect.dh_intersect.dh_intersect_guest import DhIntersectionGuest
from federatedml.statistic.intersect.dh_intersect.dh_intersect_host import DhIntersectionHost
from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_base import EcdhIntersect
from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_guest import EcdhIntersectionGuest
from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_host import EcdhIntersectionHost
__all__ = ['Intersect',
'RawIntersect',
'RsaIntersect',
'DhIntersect',
'RsaIntersectionHost',
'RsaIntersectionGuest',
'RawIntersectionHost',
'RawIntersectionGuest',
'DhIntersectionGuest',
'DhIntersectionHost',
'EcdhIntersect',
'EcdhIntersectionGuest',
'EcdhIntersectionHost']
| 2,242 | 46.723404 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/intersect_preprocess.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gmpy2
import math
import uuid
import numpy as np
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.util import consts, LOGGER
SALT_LENGTH = 8
class BitArray(object):
def __init__(self, bit_count, hash_func_count, hash_method, random_state, salt=None):
self.bit_count = bit_count
self._array = np.zeros((bit_count + 63) // 64, dtype='uint64')
self.bit_count = self._array.size * 64
self.random_state = random_state
# self.hash_encoder = Hash(hash_method, False)
self.hash_method = hash_method
self.hash_func_count = hash_func_count
self.id = str(uuid.uuid4())
self.salt = salt
if salt is None:
self.salt = self._generate_salt()
def _generate_salt(self):
random_state = np.random.RandomState(self.random_state)
def f(n):
return str(n)[2:]
return list(map(f, np.round(random_state.random(self.hash_func_count), SALT_LENGTH)))
@property
def sparsity(self):
set_bit_count = sum(map(gmpy2.popcount, map(int, self._array)))
return 1 - set_bit_count / self.bit_count
def set_array(self, new_array):
self._array = new_array
def get_array(self):
return self._array
def merge_filter(self, other):
if self.bit_count != other.bit_count:
raise ValueError(f"cannot merge filters with different bit count")
self._array |= other._array
def get_ind_set(self, x):
hash_encoder = Hash(self.hash_method, False)
return set(int(hash_encoder.compute(x,
suffix_salt=self.salt[i]),
16) % self.bit_count for i in range(self.hash_func_count))
def insert(self, x):
"""
insert given instance to bit array with hash functions
Parameters
----------
x
Returns
-------
"""
ind_set = self.get_ind_set(x)
for ind in ind_set:
self.set_bit(ind)
return self._array
def insert_ind_set(self, ind_set):
"""
insert given ind collection to bit array with hash functions
Parameters
----------
ind_set
Returns
-------
"""
for ind in ind_set:
self.set_bit(ind)
def check(self, x):
"""
check whether given instance x exists in bit array
Parameters
----------
x
Returns
-------
"""
hash_encoder = Hash(self.hash_method, False)
for i in range(self.hash_func_count):
ind = int(hash_encoder.compute(x, suffix_salt=self.salt[i]), 16) % self.bit_count
if not self.query_bit(ind):
return False
return True
def check_ind_set(self, ind_set):
"""
check whether all bits in given ind set are filled
Parameters
----------
ind_set
Returns
-------
"""
for ind in ind_set:
if not self.query_bit(ind):
return False
return True
def set_bit(self, ind):
"""
set bit at given bit index
Parameters
----------
ind
Returns
-------
"""
pos = ind >> 6
bit_pos = ind & 63
self._array[pos] |= np.uint64(1 << bit_pos)
def query_bit(self, ind):
"""
query bit != 0
Parameters
----------
ind
Returns
-------
"""
pos = ind >> 6
bit_pos = ind & 63
return (self._array[pos] & np.uint64(1 << bit_pos)) != 0
@staticmethod
def get_filter_param(n, p):
"""
Parameters
----------
n: items to store in filter
p: target false positive rate
Returns
-------
"""
# bit count
m = math.ceil(-n * math.log(p) / (math.pow(math.log(2), 2)))
# hash func count
k = round(m / n * math.log(2))
if k < consts.MIN_HASH_FUNC_COUNT:
LOGGER.info(f"computed k value {k} is smaller than min hash func count limit, "
f"set to {consts.MIN_HASH_FUNC_COUNT}")
k = consts.MIN_HASH_FUNC_COUNT
# update bit count so that target fpr = p
m = round(-n * k / math.log(1 - math.pow(p, 1 / k)))
if k > consts.MAX_HASH_FUNC_COUNT:
LOGGER.info(f"computed k value {k} is greater than max hash func count limit, "
f"set to {consts.MAX_HASH_FUNC_COUNT}")
k = consts.MAX_HASH_FUNC_COUNT
# update bit count so that target fpr = p
m = round(-n * k / math.log(1 - math.pow(p, 1 / k)))
return m, k
| 5,447 | 26.654822 | 93 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/test/intersection_guest_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
from fate_arch.session import computing_session as session
from federatedml.param.intersect_param import IntersectParam
from federatedml.secureprotol.hash.hash_factory import Hash
class TestRsaIntersectGuest(unittest.TestCase):
def setUp(self):
self.jobid = str(uuid.uuid1())
session.init(self.jobid)
from federatedml.statistic.intersect import RsaIntersectionGuest
from federatedml.statistic.intersect import RsaIntersect
intersect_param = IntersectParam()
self.rsa_operator = RsaIntersectionGuest()
self.rsa_operator.load_params(intersect_param)
self.rsa_op2 = RsaIntersect()
self.rsa_op2.load_params(intersect_param)
def data_to_table(self, data):
return session.parallelize(data, include_key=True, partition=2)
def test_func_map_raw_id_to_encrypt_id(self):
d1 = [("a", 1), ("b", 2), ("c", 3)]
d2 = [(4, "a"), (5, "b"), (6, "c")]
D1 = self.data_to_table(d1)
D2 = self.data_to_table(d2)
res = self.rsa_operator.map_raw_id_to_encrypt_id(D1, D2)
gt = [(4, None), (5, None), (6, None)]
self.assertListEqual(list(res.collect()), gt)
def test_hash(self):
hash_operator = Hash("sha256")
res = str(self.rsa_op2.hash("1", hash_operator))
self.assertEqual(res, "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b")
def tearDown(self):
session.stop()
if __name__ == "__main__":
unittest.main()
| 2,149 | 33.126984 | 97 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/test/intersection_host_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
from fate_arch.session import computing_session as session
from federatedml.param.intersect_param import IntersectParam
class TestRsaIntersectHost(unittest.TestCase):
def setUp(self):
self.jobid = str(uuid.uuid1())
session.init(self.jobid)
from federatedml.statistic.intersect import RsaIntersectionHost
from federatedml.statistic.intersect import RawIntersectionHost
intersect_param = IntersectParam()
self.rsa_operator = RsaIntersectionHost()
self.rsa_operator.load_params(intersect_param)
self.raw_operator = RawIntersectionHost()
self.raw_operator.load_params(intersect_param)
def data_to_table(self, data):
return session.parallelize(data, include_key=True, partition=2)
def test_func_generate_rsa_key(self):
res = self.rsa_operator.generate_rsa_key(1024)
self.assertEqual(65537, res[0])
def test_get_common_intersection(self):
d1 = [(1, "a"), (2, "b"), (4, "c")]
d2 = [(4, "a"), (5, "b"), (6, "c")]
d3 = [(4, "a"), (5, "b"), (7, "c")]
D1 = self.data_to_table(d1)
D2 = self.data_to_table(d2)
D3 = self.data_to_table(d3)
res = self.raw_operator.get_common_intersection([D1, D2, D3])
gt = [(4, None)]
self.assertListEqual(list(res.collect()), gt)
def tearDown(self):
session.stop()
if __name__ == "__main__":
unittest.main()
| 2,085 | 32.111111 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/dh_intersect/dh_intersect_host.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from federatedml.secureprotol.symmetric_encryption.cryptor_executor import CryptoExecutor
from federatedml.secureprotol.symmetric_encryption.pohlig_hellman_encryption import PohligHellmanCipherKey
from federatedml.statistic.intersect.dh_intersect.dh_intersect_base import DhIntersect
from federatedml.util import consts, LOGGER
class DhIntersectionHost(DhIntersect):
def __init__(self):
super().__init__()
self.role = consts.HOST
self.id_list_local_first = None
def _exchange_id(self, id_cipher, replace_val=True):
if replace_val:
id_cipher = id_cipher.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_list_exchange_h2g.remote(id_cipher,
role=consts.GUEST,
idx=0)
LOGGER.info("sent id 1st ciphertext to guest")
id_guest = self.transfer_variable.id_ciphertext_list_exchange_g2h.get(idx=0)
LOGGER.info("got id 1st ciphertext from guest")
return id_guest
def _sync_doubly_encrypted_id_list(self, id_list):
self.transfer_variable.doubly_encrypted_id_list.remote(id_list,
role=consts.GUEST,
idx=0)
LOGGER.info("sent doubly encrypted id list to guest")
def get_intersect_ids(self):
first_cipher_intersect_ids = self.transfer_variable.intersect_ids.get(idx=0)
LOGGER.info(f"obtained cipher intersect ids from guest")
intersect_ids = self.map_encrypt_id_to_raw_id(first_cipher_intersect_ids,
self.id_list_local_first,
keep_encrypt_id=False)
return intersect_ids
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
self._generate_commutative_cipher()
self.commutative_cipher.init()
# 1st ID encrypt: (Eh, (h, Instance))
self.id_list_local_first = self._encrypt_id(data_instances,
self.commutative_cipher,
reserve_original_key=keep_key,
hash_operator=self.hash_operator,
salt=self.salt,
reserve_original_value=keep_key)
LOGGER.info("encrypted local id for the 1st time")
# send (Eh, -1), get (Eg, -1)
id_list_remote_first = self._exchange_id(self.id_list_local_first, keep_key)
# 2nd ID encrypt & send doubly encrypted guest ID list to guest
id_list_remote_second = self._encrypt_id(id_list_remote_first,
self.commutative_cipher,
reserve_original_key=keep_key) # (EEg, Eg)
LOGGER.info("encrypted guest id for the 2nd time")
self._sync_doubly_encrypted_id_list(id_list_remote_second)
def decrypt_intersect_doubly_encrypted_id(self, id_list_intersect_cipher_cipher=None):
"""
if self.cardinality_only:
cardinality = None
if self.sync_cardinality:
cardinality = self.transfer_variable.cardinality.get(cardinality, role=consts.GUEST, idx=0)
LOGGER.info(f"Got intersect cardinality from guest.")
return cardinality
"""
intersect_ids = None
if self.sync_intersect_ids:
intersect_ids = self.get_intersect_ids()
return intersect_ids
def get_intersect_key(self, party_id=None):
cipher_core = self.commutative_cipher.cipher_core
intersect_key = {"mod_base": str(cipher_core.mod_base),
"exponent": str(cipher_core.exponent)}
return intersect_key
def load_intersect_key(self, cache_meta):
intersect_key = cache_meta[str(self.guest_party_id)]["intersect_key"]
mod_base = int(intersect_key["mod_base"])
exponent = int(intersect_key["exponent"])
ph_key = PohligHellmanCipherKey(mod_base, exponent)
self.commutative_cipher = CryptoExecutor(ph_key)
def generate_cache(self, data_instances):
self._generate_commutative_cipher()
self.commutative_cipher.init()
cache_id = str(uuid.uuid4())
self.cache_id = {self.guest_party_id: cache_id}
# id_only.schema = cache_schema
self.cache_transfer_variable.remote(cache_id, role=consts.GUEST, idx=0)
LOGGER.info(f"remote cache_id to guest")
# 1st ID encrypt: (Eh, (h, Instance))
id_list_local_first = self._encrypt_id(data_instances,
self.commutative_cipher,
reserve_original_key=True,
hash_operator=self.hash_operator,
salt=self.salt,
reserve_original_value=True)
LOGGER.info("encrypted local id for the 1st time")
# cache_schema = {"cache_id": cache_id}
# id_list_local_first.schema = cache_schema
id_only = id_list_local_first.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_list_exchange_h2g.remote(id_only,
role=consts.GUEST,
idx=0)
LOGGER.info("sent id 1st ciphertext list to guest")
cache_data = {self.guest_party_id: id_list_local_first}
cache_meta = {self.guest_party_id: {"cache_id": cache_id,
"intersect_meta": self.get_intersect_method_meta(),
"intersect_key": self.get_intersect_key()}}
return cache_data, cache_meta
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_data):
id_list_remote_first = self.transfer_variable.id_ciphertext_list_exchange_g2h.get(idx=0)
LOGGER.info("got id 1st ciphertext list from guest")
# 2nd ID encrypt & send doubly encrypted guest ID list to guest
id_list_remote_second = self._encrypt_id(id_list_remote_first,
self.commutative_cipher,
reserve_original_key=True) # (EEg, Eg)
LOGGER.info("encrypted guest id for the 2nd time")
self.id_list_local_first = self.extract_cache_list(cache_data, self.guest_party_id)[0]
self._sync_doubly_encrypted_id_list(id_list_remote_second)
def run_cardinality(self, data_instances):
LOGGER.info(f"run exact_cardinality with DH")
self.get_intersect_doubly_encrypted_id(data_instances, keep_key=True)
if self.sync_cardinality:
self.intersect_num = self.transfer_variable.cardinality.get(idx=0)
LOGGER.info("Got intersect cardinality from guest.")
| 7,896 | 46.860606 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/dh_intersect/dh_intersect_guest.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.symmetric_encryption.cryptor_executor import CryptoExecutor
from federatedml.secureprotol.symmetric_encryption.pohlig_hellman_encryption import PohligHellmanCipherKey
from federatedml.statistic.intersect.dh_intersect.dh_intersect_base import DhIntersect
from federatedml.util import consts, LOGGER
class DhIntersectionGuest(DhIntersect):
def __init__(self):
super().__init__()
self.role = consts.GUEST
self.id_list_local_first = None
self.id_local_first = None
self.id_list_remote_second = None
self.id_list_local_second = None
self.host_count = None
# self.recorded_k_data = None
def _exchange_id(self, id_cipher, replace_val=True):
"""for i, id in enumerate(id_list):
if replace_val:
id_only = id.mapValues(lambda v: None)
else:
id_only = id
self.transfer_variable.id_ciphertext_list_exchange_g2h.remote(id_only,
role=consts.HOST,
idx=i)
LOGGER.info(f"sent id 1st ciphertext list to {i} th host")"""
if replace_val:
id_cipher = id_cipher.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_list_exchange_g2h.remote(id_cipher,
role=consts.HOST,
idx=-1)
LOGGER.info(f"sent id 1st ciphertext to all host")
id_list_remote = self.transfer_variable.id_ciphertext_list_exchange_h2g.get(idx=-1)
LOGGER.info("got id ciphertext list from all host")
return id_list_remote
def _sync_doubly_encrypted_id_list(self, id_list=None):
id_list_guest = self.transfer_variable.doubly_encrypted_id_list.get(idx=-1)
LOGGER.info("got doubly encrypted id list from all host")
return id_list_guest
def send_intersect_ids(self, intersect_ids):
for i, host_party_id in enumerate(self.host_party_id_list):
remote_intersect_id = intersect_ids.map(lambda k, v: (v[i], None))
self.transfer_variable.intersect_ids.remote(remote_intersect_id,
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote intersect ids to {i}th Host {host_party_id}!")
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
self._generate_commutative_cipher()
self.commutative_cipher.init()
self.host_count = len(self.host_party_id_list)
LOGGER.info("commutative cipher key generated")
# 1st ID encrypt: # (Eg, -1)
self.id_local_first = self._encrypt_id(data_instances,
self.commutative_cipher,
reserve_original_key=keep_key,
hash_operator=self.hash_operator,
salt=self.salt)
LOGGER.info("encrypted guest id for the 1st time")
id_list_remote_first = self._exchange_id(self.id_local_first, keep_key)
# 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)
self.id_list_remote_second = [self._encrypt_id(id_list_remote_first[i],
self.commutative_cipher,
reserve_original_key=keep_key)
for i in range(self.host_count)]
LOGGER.info("encrypted remote id for the 2nd time")
# receive doubly encrypted ID list from all host:
self.id_list_local_second = self._sync_doubly_encrypted_id_list() # get (EEg, Eg)
# find intersection per host
id_list_intersect_cipher_cipher = [self.extract_intersect_ids(self.id_list_remote_second[i],
self.id_list_local_second[i],
keep_both=keep_key)
for i in range(self.host_count)] # (EEi, [Eh, Eg])
LOGGER.info("encrypted intersection ids found")
return id_list_intersect_cipher_cipher
def decrypt_intersect_doubly_encrypted_id(self, id_list_intersect_cipher_cipher):
# EEi -> (Eg, Eh)
id_list_intersect_cipher = [ids.map(lambda k, v: (v[1], [v[0]])) for ids in id_list_intersect_cipher_cipher]
intersect_ids = self.get_common_intersection(id_list_intersect_cipher, keep_encrypt_ids=True)
LOGGER.info(f"intersection found")
if self.sync_intersect_ids:
self.send_intersect_ids(intersect_ids)
else:
LOGGER.info("Skip sync intersect ids with Host(s).")
intersect_ids = intersect_ids.join(self.id_local_first, lambda cipher, raw: raw)
intersect_ids = intersect_ids.map(lambda k, v: (v, None))
return intersect_ids
def load_intersect_key(self, cache_meta):
host_party = self.host_party_id_list[0]
intersect_key = cache_meta[str(host_party)]["intersect_key"]
mod_base = int(intersect_key["mod_base"])
exponent = int(intersect_key["exponent"])
for host_party in self.host_party_id_list:
cur_intersect_key = cache_meta[str(host_party)]["intersect_key"]
cur_mod_base = int(cur_intersect_key["mod_base"])
cur_exponent = int(cur_intersect_key["exponent"])
if cur_mod_base != mod_base or cur_exponent != exponent:
raise ValueError("Not all intersect keys from cache match, please check.")
ph_key = PohligHellmanCipherKey(mod_base, exponent)
self.commutative_cipher = CryptoExecutor(ph_key)
def generate_cache(self, data_instances):
self._generate_commutative_cipher()
self.commutative_cipher.init()
LOGGER.info("commutative cipher key generated")
cache_id_list = self.cache_transfer_variable.get(idx=-1)
LOGGER.info(f"got cache_id from all host")
id_list_remote_first = self.transfer_variable.id_ciphertext_list_exchange_h2g.get(idx=-1)
LOGGER.info("Get id ciphertext list from all host")
# 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)
id_list_remote_second = [self._encrypt_id(id_remote_first,
self.commutative_cipher,
reserve_original_key=True)
for id_remote_first in id_list_remote_first]
LOGGER.info("encrypted remote id for the 2nd time")
cache_data, cache_meta = {}, {}
intersect_meta = self.get_intersect_method_meta()
cipher_core = self.commutative_cipher.cipher_core
intersect_key = {"mod_base": str(cipher_core.mod_base),
"exponent": str(cipher_core.exponent)}
for i, party_id in enumerate(self.host_party_id_list):
meta = {"cache_id": cache_id_list[i],
"intersect_meta": intersect_meta,
"intersect_key": intersect_key}
cache_meta[party_id] = meta
cache_data[party_id] = id_list_remote_second[i]
return cache_data, cache_meta
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_data):
self.id_local_first = self._encrypt_id(data_instances,
self.commutative_cipher,
reserve_original_key=True,
hash_operator=self.hash_operator,
salt=self.salt)
LOGGER.info("encrypted guest id for the 1st time")
id_only = self.id_local_first.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_list_exchange_g2h.remote(id_only,
role=consts.HOST,
idx=-1)
LOGGER.info(f"sent id 1st ciphertext list to all host")
# receive doubly encrypted ID list from all host:
self.id_list_local_second = self._sync_doubly_encrypted_id_list() # get (EEg, Eg)
self.host_count = len(self.id_list_local_second)
# find intersection per host
cache_list = self.extract_cache_list(cache_data, self.host_party_id_list)
id_list_intersect_cipher_cipher = [self.extract_intersect_ids(cache_list[i],
self.id_list_local_second[i],
keep_both=True)
for i in range(self.host_count)] # (EEi, [Eh, Eg])
LOGGER.info("encrypted intersection ids found")
self.id_list_remote_second = cache_list
return id_list_intersect_cipher_cipher
def run_cardinality(self, data_instances):
LOGGER.info(f"run cardinality_only with DH")
id_list_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id(data_instances, keep_key=False)
id_intersect_cipher_cipher = self.filter_intersect_ids(id_list_intersect_cipher_cipher,
keep_encrypt_ids=False)
self.intersect_num = id_intersect_cipher_cipher.count()
if self.sync_cardinality:
self.transfer_variable.cardinality.remote(self.intersect_num, role=consts.HOST, idx=-1)
LOGGER.info("Sent intersect cardinality to host.")
else:
LOGGER.info("Skip sync intersect cardinality with host")
| 10,621 | 50.814634 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/dh_intersect/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/dh_intersect/dh_intersect_base.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.secureprotol.symmetric_encryption.cryptor_executor import CryptoExecutor
from federatedml.secureprotol.symmetric_encryption.pohlig_hellman_encryption import PohligHellmanCipherKey
from federatedml.statistic.intersect.base_intersect import Intersect
from federatedml.transfer_variable.transfer_class.dh_intersect_transfer_variable import DhIntersectTransferVariable
from federatedml.util import LOGGER, consts
class DhIntersect(Intersect):
"""
adapted from Secure Information Retrieval Module
"""
def __init__(self):
super().__init__()
self.role = None
self.transfer_variable = DhIntersectTransferVariable()
self.commutative_cipher = None
def load_params(self, param):
super().load_params(param=param)
self.dh_params = param.dh_params
self.hash_operator = Hash(param.dh_params.hash_method)
self.salt = self.dh_params.salt
self.key_length = self.dh_params.key_length
def get_intersect_method_meta(self):
dh_meta = {"intersect_method": consts.DH,
"hash_method": self.dh_params.hash_method,
"salt": self.salt}
return dh_meta
@staticmethod
def _encrypt_id(data_instances, cipher, reserve_original_key=False, hash_operator=None, salt='',
reserve_original_value=False):
"""
Encrypt the key (ID) of input Table
:param cipher: cipher object
:param data_instance: Table
:param reserve_original_key: (enc_key, ori_key) if reserve_original_key == True, otherwise (enc_key, -1)
:param hash_operator: if provided, use map_hash_encrypt
:param salt: if provided, use for map_hash_encrypt
: param reserve_original_value:
(enc_key, (ori_key, val)) for reserve_original_key == True and reserve_original_value==True;
(ori_key, (enc_key, val)) for only reserve_original_value == True.
:return:
"""
mode = DhIntersect._get_mode(reserve_original_key, reserve_original_value)
if hash_operator is not None:
return cipher.map_hash_encrypt(data_instances, mode=mode, hash_operator=hash_operator, salt=salt)
return cipher.map_encrypt(data_instances, mode=mode)
@staticmethod
def _get_mode(reserve_original_key=False, reserve_original_value=False):
if reserve_original_key and reserve_original_value:
return 5
if reserve_original_key:
return 4
if reserve_original_value:
return 3
return 1
def _generate_commutative_cipher(self):
self.commutative_cipher = CryptoExecutor(PohligHellmanCipherKey.generate_key(self.key_length))
def _exchange_id(self, id_cipher, replace_val=True):
"""
:param id_cipher: Table in the form (id, 0)
:return:
"""
pass
def _sync_doubly_encrypted_id_list(self, id_list):
"""
host -> guest
:param id_list:
:return:
"""
pass
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
raise NotImplementedError("This method should not be called here")
def decrypt_intersect_doubly_encrypted_id(self, id_list_intersect_cipher_cipher):
raise NotImplementedError("This method should not be called here")
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_set):
raise NotImplementedError("This method should not be called here")
def run_intersect(self, data_instances):
LOGGER.info("Start DH Intersection")
id_list_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id(data_instances)
intersect_ids = self.decrypt_intersect_doubly_encrypted_id(id_list_intersect_cipher_cipher)
return intersect_ids
def run_cache_intersect(self, data_instances, cache_data):
LOGGER.info("Start DH Intersection with cache")
id_list_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id_from_cache(data_instances, cache_data)
intersect_ids = self.decrypt_intersect_doubly_encrypted_id(id_list_intersect_cipher_cipher)
return intersect_ids
| 4,890 | 40.803419 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/ecdh_intersect/ecdh_intersect_base.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.elliptic_curve_encryption import EllipticCurve
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.statistic.intersect.base_intersect import Intersect
from federatedml.transfer_variable.transfer_class.ecdh_intersect_transfer_variable import EcdhIntersectTransferVariable
from federatedml.util import LOGGER, consts
class EcdhIntersect(Intersect):
"""
adapted from Secure Information Retrieval Module
"""
def __init__(self):
super().__init__()
self.role = None
self.transfer_variable = EcdhIntersectTransferVariable()
self.curve_instance = None
def load_params(self, param):
super().load_params(param=param)
self.ecdh_params = param.ecdh_params
self.hash_operator = Hash(param.ecdh_params.hash_method, hex_output=False)
self.salt = self.ecdh_params.salt
self.curve = self.ecdh_params.curve
def get_intersect_method_meta(self):
ecdh_meta = {"intersect_method": consts.ECDH,
"hash_method": self.ecdh_params.hash_method,
"salt": self.salt,
"curve": self.curve}
return ecdh_meta
def init_curve(self, curve_key=None):
self.curve_instance = EllipticCurve(self.curve, curve_key)
@staticmethod
def get_mode(reserve_original_key=False, reserve_original_value=False):
if reserve_original_key and reserve_original_value:
return 5
if reserve_original_key:
return 4
if reserve_original_value:
return 3
return 1
@staticmethod
def _encrypt_id(data_instances, curve_instance, reserve_original_key=False, hash_operator=None, salt='',
reserve_original_value=False):
"""
Encrypt the key (ID) of input Table
:param curve: curve object
:param data_instance: Table
:param reserve_original_key: (enc_key, ori_key) if reserve_original_key == True, otherwise (enc_key, -1)
:param hash_operator: if provided, use map_hash_encrypt
:param salt: if provided, use for map_hash_encrypt
: param reserve_original_value:
(enc_key, (ori_key, val)) for reserve_original_key == True and reserve_original_value==True;
(ori_key, (enc_key, val)) for only reserve_original_value == True.
:return:
"""
mode = EcdhIntersect.get_mode(reserve_original_key, reserve_original_value)
if hash_operator is not None:
return curve_instance.map_hash_encrypt(data_instances, mode=mode, hash_operator=hash_operator, salt=salt)
return curve_instance.map_encrypt(data_instances, mode=mode)
@staticmethod
def _sign_id(data_instances, curve_instance, reserve_original_key=False, reserve_original_value=False):
"""
Encrypt the key (ID) of input Table
:param curve_instance: curve object
:param data_instance: Table
:param reserve_original_key: (enc_key, ori_key) if reserve_original_key == True, otherwise (enc_key, -1)
: param reserve_original_value:
(enc_key, (ori_key, val)) for reserve_original_key == True and reserve_original_value==True;
(ori_key, (enc_key, val)) for only reserve_original_value == True.
:return:
"""
mode = EcdhIntersect.get_mode(reserve_original_key, reserve_original_value)
return curve_instance.map_sign(data_instances, mode=mode)
def _exchange_id(self, id, replace_val=True):
"""
:param id: Table in the form (id, 0)
:return:
"""
pass
def _sync_doubly_encrypted_id(self, id):
"""
host -> guest
:param id:
:return:
"""
pass
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
raise NotImplementedError("This method should not be called here")
def decrypt_intersect_doubly_encrypted_id(self, id_intersect_cipher_cipher):
raise NotImplementedError("This method should not be called here")
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_set):
raise NotImplementedError("This method should not be called here")
def run_intersect(self, data_instances):
LOGGER.info("Start ECDH Intersection")
id_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id(data_instances)
intersect_ids = self.decrypt_intersect_doubly_encrypted_id(id_intersect_cipher_cipher)
return intersect_ids
def run_cache_intersect(self, data_instances, cache_data):
LOGGER.info("Start ECDH Intersection with cache")
id_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id_from_cache(data_instances, cache_data)
intersect_ids = self.decrypt_intersect_doubly_encrypted_id(id_intersect_cipher_cipher)
return intersect_ids
| 5,559 | 41.121212 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/ecdh_intersect/ecdh_intersect_guest.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_base import EcdhIntersect
from federatedml.util import consts, LOGGER
class EcdhIntersectionGuest(EcdhIntersect):
def __init__(self):
super().__init__()
self.role = consts.GUEST
self.id_local_first = None
self.id_remote_second = None
self.id_local_second = None
self.host_count = None
def _exchange_id(self, id_cipher, replace_val=True):
if replace_val:
id_cipher = id_cipher.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_exchange_g2h.remote(id_cipher,
role=consts.HOST,
idx=-1)
LOGGER.info(f"sent id 1st ciphertext to all host")
id_list_remote = self.transfer_variable.id_ciphertext_exchange_h2g.get(idx=-1)
LOGGER.info("got id ciphertext from all host")
return id_list_remote
def _sync_doubly_encrypted_id(self, id=None):
id_guest = self.transfer_variable.doubly_encrypted_id.get(idx=-1)
LOGGER.info("got doubly encrypted id list from host")
return id_guest
"""
def send_intersect_ids(self, intersect_ids):
remote_intersect_id = intersect_ids.map(lambda k, v: (v, None))
self.transfer_variable.intersect_ids.remote(remote_intersect_id,
role=consts.HOST,
idx=0)
LOGGER.info(f"Remote intersect ids to Host!")
"""
def send_intersect_ids(self, intersect_ids):
for i, host_party_id in enumerate(self.host_party_id_list):
remote_intersect_id = intersect_ids.map(lambda k, v: (v[i], None))
self.transfer_variable.intersect_ids.remote(remote_intersect_id,
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote intersect ids to {i}th Host {host_party_id}!")
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
self.init_curve()
LOGGER.info(f"curve instance obtained")
# 1st ID encrypt: # (Eg, -1)
self.id_local_first = self._encrypt_id(data_instances,
self.curve_instance,
reserve_original_key=keep_key,
hash_operator=self.hash_operator,
salt=self.salt)
LOGGER.info("encrypted guest id for the 1st time")
id_list_remote_first = self._exchange_id(self.id_local_first, keep_key)
# 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)
self.id_list_remote_second = [self._sign_id(id_remote_first,
self.curve_instance,
reserve_original_key=keep_key)
for id_remote_first in id_list_remote_first]
LOGGER.info("encrypted remote id for the 2nd time")
# receive doubly encrypted ID list from all host:
self.id_list_local_second = self._sync_doubly_encrypted_id() # get (EEg, Eg)
# find intersection per host: (EEi, [Eg, Eh])
id_list_intersect_cipher_cipher = [self.extract_intersect_ids(remote_cipher,
local_cipher,
keep_both=keep_key)
for remote_cipher, local_cipher in zip(self.id_list_remote_second,
self.id_list_local_second)]
LOGGER.info("encrypted intersection ids found")
return id_list_intersect_cipher_cipher
def decrypt_intersect_doubly_encrypted_id(self, id_intersect_cipher_cipher):
# EEi -> (Eg, Eh)
id_list_intersect_cipher = [ids.map(lambda k, v: (v[1], [v[0]])) for ids in id_intersect_cipher_cipher]
intersect_ids = self.get_common_intersection(id_list_intersect_cipher, keep_encrypt_ids=True)
LOGGER.info(f"intersection found")
if self.sync_intersect_ids:
self.send_intersect_ids(intersect_ids)
else:
LOGGER.info("Skip sync intersect ids with Host(s).")
intersect_ids = intersect_ids.join(self.id_local_first, lambda cipher, raw: raw)
intersect_ids = intersect_ids.map(lambda k, v: (v, None))
return intersect_ids
def get_intersect_key(self, party_id):
intersect_key = {"curve_key": self.curve_instance.get_curve_key().decode("latin1")}
return intersect_key
def load_intersect_key(self, cache_meta):
host_party = self.host_party_id_list[0]
intersect_key = cache_meta[str(host_party)]["intersect_key"]
for host_party in self.host_party_id_list:
cur_intersect_key = cache_meta[str(host_party)]["intersect_key"]
if cur_intersect_key != cur_intersect_key:
raise ValueError(f"Not all intersect keys from cache match, please check.")
curve_key = intersect_key["curve_key"].encode("latin1")
self.init_curve(curve_key)
def generate_cache(self, data_instances):
self.init_curve()
LOGGER.info(f"curve instance obtained")
cache_id_list = self.cache_transfer_variable.get(idx=-1)
LOGGER.info(f"got cache_id from all host")
id_list_remote_first = self.transfer_variable.id_ciphertext_exchange_h2g.get(idx=-1)
LOGGER.info("Get id ciphertext list from all host")
# 2nd ID encrypt & receive doubly encrypted ID list: # (EEh, Eh)
id_remote_second = [self._sign_id(id_remote_first,
self.curve_instance,
reserve_original_key=True)
for id_remote_first in id_list_remote_first]
LOGGER.info("encrypted remote id for the 2nd time")
cache_data, cache_meta = {}, {}
intersect_meta = self.get_intersect_method_meta()
for i, party_id in enumerate(self.host_party_id_list):
meta = {"cache_id": cache_id_list[i],
"intersect_meta": intersect_meta,
"intersect_key": self.get_intersect_key(party_id)}
cache_meta[party_id] = meta
cache_data[party_id] = id_remote_second[i]
return cache_data, cache_meta
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_data):
self.id_local_first = self._encrypt_id(data_instances,
self.curve_instance,
reserve_original_key=True,
hash_operator=self.hash_operator,
salt=self.salt)
LOGGER.info("encrypted guest id for the 1st time")
id_only = self.id_local_first.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_exchange_g2h.remote(id_only,
role=consts.HOST,
idx=-1)
LOGGER.info(f"sent id 1st ciphertext to host")
# receive doubly encrypted ID from all hosts:
self.id_list_local_second = self._sync_doubly_encrypted_id() # get (EEg, Eg)
self.host_count = len(self.id_list_local_second)
# find intersection: (EEi, [Eg, Eh])
cache_host_list = self.extract_cache_list(cache_data, self.host_party_id_list)
id_list_intersect_cipher_cipher = [self.extract_intersect_ids(cache_host_list[i],
self.id_list_local_second[i],
keep_both=True)
for i in range(self.host_count)]
LOGGER.info("encrypted intersection ids found")
self.id_remote_second = cache_host_list
return id_list_intersect_cipher_cipher
def run_cardinality(self, data_instances):
LOGGER.info(f"run cardinality_only with ECDH")
# EEg, Eg
id_list_intersect_cipher_cipher = self.get_intersect_doubly_encrypted_id(data_instances,
keep_key=False)
# Eg
id_intersect_cipher_cipher = self.filter_intersect_ids(id_list_intersect_cipher_cipher)
self.intersect_num = id_intersect_cipher_cipher.count()
if self.sync_cardinality:
self.transfer_variable.cardinality.remote(self.intersect_num, role=consts.HOST, idx=-1)
LOGGER.info("Sent intersect cardinality to host.")
else:
LOGGER.info("Skip sync intersect cardinality with host")
| 9,766 | 48.831633 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/ecdh_intersect/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/ecdh_intersect/ecdh_intersect_host.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from federatedml.statistic.intersect.ecdh_intersect.ecdh_intersect_base import EcdhIntersect
from federatedml.util import consts, LOGGER
class EcdhIntersectionHost(EcdhIntersect):
def __init__(self):
super().__init__()
self.role = consts.HOST
self.id_local_first = None
def _exchange_id(self, id, replace_val=True):
if replace_val:
id_only = id.mapValues(lambda v: None)
else:
id_only = id
self.transfer_variable.id_ciphertext_exchange_h2g.remote(id_only,
role=consts.GUEST,
idx=0)
LOGGER.info("sent id 1st ciphertext list to guest")
id_guest = self.transfer_variable.id_ciphertext_exchange_g2h.get(idx=0)
LOGGER.info("got id 1st ciphertext list from guest")
return id_guest
def _sync_doubly_encrypted_id(self, id):
self.transfer_variable.doubly_encrypted_id.remote(id,
role=consts.GUEST,
idx=0)
LOGGER.info("sent doubly encrypted id list to guest")
def get_intersect_ids(self):
first_cipher_intersect_ids = self.transfer_variable.intersect_ids.get(idx=0)
LOGGER.info(f"obtained cipher intersect ids from guest")
intersect_ids = self.map_encrypt_id_to_raw_id(first_cipher_intersect_ids,
self.id_local_first,
keep_encrypt_id=False)
return intersect_ids
def get_intersect_doubly_encrypted_id(self, data_instances, keep_key=True):
self.init_curve()
LOGGER.info(f"curve instance obtained")
# 1st ID encrypt: (Eh, (h, Instance))
self.id_local_first = self._encrypt_id(data_instances,
self.curve_instance,
reserve_original_key=keep_key,
hash_operator=self.hash_operator,
salt=self.salt,
reserve_original_value=keep_key)
LOGGER.info("encrypted local id for the 1st time")
# send (Eh, -1), get (Eg, -1)
id_remote_first = self._exchange_id(self.id_local_first, keep_key)
# 2nd ID encrypt & send doubly encrypted guest ID list to guest
id_remote_second = self._sign_id(id_remote_first,
self.curve_instance,
reserve_original_key=keep_key) # (EEg, Eg)
LOGGER.info("encrypted guest id for the 2nd time")
self._sync_doubly_encrypted_id(id_remote_second)
def decrypt_intersect_doubly_encrypted_id(self, id_intersect_cipher_cipher=None):
intersect_ids = None
if self.sync_intersect_ids:
intersect_ids = self.get_intersect_ids()
return intersect_ids
def get_intersect_key(self, party_id=None):
intersect_key = {"curve_key": self.curve_instance.get_curve_key().decode("latin1")}
return intersect_key
def load_intersect_key(self, cache_meta):
intersect_key = cache_meta[str(self.guest_party_id)]["intersect_key"]
curve_key = intersect_key["curve_key"].encode("latin1")
self.init_curve(curve_key)
def generate_cache(self, data_instances):
self.init_curve()
LOGGER.info(f"curve instance obtained")
cache_id = str(uuid.uuid4())
self.cache_id = {self.guest_party_id: cache_id}
self.cache_transfer_variable.remote(cache_id, role=consts.GUEST, idx=0)
LOGGER.info(f"remote cache_id to guest")
# 1st ID encrypt: (Eh, (h, Instance))
id_local_first = self._encrypt_id(data_instances,
self.curve_instance,
reserve_original_key=True,
hash_operator=self.hash_operator,
salt=self.salt,
reserve_original_value=True)
LOGGER.info("encrypted local id for the 1st time")
id_only = id_local_first.mapValues(lambda v: None)
self.transfer_variable.id_ciphertext_exchange_h2g.remote(id_only,
role=consts.GUEST,
idx=0)
LOGGER.info("sent id 1st ciphertext list to guest")
cache_data = {self.guest_party_id: id_local_first}
cache_meta = {self.guest_party_id: {"cache_id": cache_id,
"intersect_meta": self.get_intersect_method_meta(),
"intersect_key": self.get_intersect_key()}}
return cache_data, cache_meta
def get_intersect_doubly_encrypted_id_from_cache(self, data_instances, cache_data):
id_remote_first = self.transfer_variable.id_ciphertext_exchange_g2h.get(idx=0)
LOGGER.info("got id 1st ciphertext from guest")
# 2nd ID encrypt & send doubly encrypted guest ID to guest
id_remote_second = self._sign_id(id_remote_first,
self.curve_instance,
reserve_original_key=True) # (EEg, Eg)
LOGGER.info("encrypted guest id for the 2nd time")
self.id_local_first = self.extract_cache_list(cache_data, self.guest_party_id)[0]
self._sync_doubly_encrypted_id(id_remote_second)
def run_cardinality(self, data_instances):
LOGGER.info(f"run exact_cardinality with DH")
self.get_intersect_doubly_encrypted_id(data_instances, keep_key=True)
if self.sync_cardinality:
self.intersect_num = self.transfer_variable.cardinality.get(idx=0)
LOGGER.info("Got intersect cardinality from guest.")
| 6,749 | 45.875 | 95 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/raw_intersect/raw_intersect_guest.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.statistic.intersect.raw_intersect.raw_intersect_base import RawIntersect
from federatedml.util import consts, LOGGER
class RawIntersectionGuest(RawIntersect):
def __init__(self):
super().__init__()
self.role = consts.GUEST
def run_intersect(self, data_instances):
LOGGER.info("Start raw intersection")
if self.join_role == consts.HOST:
intersect_ids = self.intersect_send_id(data_instances)
elif self.join_role == consts.GUEST:
intersect_ids = self.intersect_join_id(data_instances)
else:
raise ValueError("Unknown intersect join role, please check the configure of guest")
return intersect_ids
| 1,334 | 35.081081 | 96 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/raw_intersect/raw_intersect_host.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.statistic.intersect.raw_intersect.raw_intersect_base import RawIntersect
from federatedml.util import consts, LOGGER
class RawIntersectionHost(RawIntersect):
def __init__(self):
super().__init__()
self.role = consts.HOST
def run_intersect(self, data_instances):
LOGGER.info("Start raw intersection")
if self.join_role == consts.GUEST:
intersect_ids = self.intersect_send_id(data_instances)
elif self.join_role == consts.HOST:
intersect_ids = self.intersect_join_id(data_instances)
else:
raise ValueError("Unknown intersect join role, please check job configuration")
return intersect_ids
| 1,327 | 34.891892 | 91 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/raw_intersect/raw_intersect_base.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.statistic.intersect import Intersect
from federatedml.transfer_variable.transfer_class.raw_intersect_transfer_variable import RawIntersectTransferVariable
from federatedml.util import consts, LOGGER
class RawIntersect(Intersect):
def __init__(self):
super().__init__()
self.role = None
self.transfer_variable = RawIntersectTransferVariable()
self.task_version_id = None
self.tracker = None
def load_params(self, param):
# self.only_output_key = param.only_output_key
# self.sync_intersect_ids = param.sync_intersect_ids
super().load_params(param=param)
self.raw_params = param.raw_params
self.use_hash = self.raw_params.use_hash
self.hash_method = self.raw_params.hash_method
self.base64 = self.raw_params.base64
self.salt = self.raw_params.salt
self.join_role = self.raw_params.join_role
self.hash_operator = Hash(self.hash_method, self.base64)
def intersect_send_id(self, data_instances):
sid_hash_pair = None
if self.use_hash and self.hash_method != "none":
sid_hash_pair = data_instances.map(
lambda k, v: (Intersect.hash(k, self.hash_operator, self.salt), k))
data_sid = sid_hash_pair.mapValues(lambda v: None)
else:
data_sid = data_instances.mapValues(lambda v: None)
LOGGER.info("Send id role is {}".format(self.role))
if self.role == consts.GUEST:
send_ids_federation = self.transfer_variable.send_ids_guest
recv_role = consts.HOST
elif self.role == consts.HOST:
send_ids_federation = self.transfer_variable.send_ids_host
recv_role = consts.GUEST
else:
raise ValueError("Unknown intersect role, please check the code")
send_ids_federation.remote(data_sid,
role=recv_role,
idx=-1)
LOGGER.info("Remote data_sid to role-join")
intersect_ids = None
if self.sync_intersect_ids:
if self.role == consts.HOST:
intersect_ids_federation = self.transfer_variable.intersect_ids_guest
elif self.role == consts.GUEST:
intersect_ids_federation = self.transfer_variable.intersect_ids_host
else:
raise ValueError("Unknown intersect role, please check the code")
recv_intersect_ids_list = intersect_ids_federation.get(idx=-1)
LOGGER.info("Get intersect ids from role-join!")
ids_list_size = len(recv_intersect_ids_list)
LOGGER.info("recv_intersect_ids_list's size is {}".format(ids_list_size))
recv_intersect_ids = self.get_common_intersection(recv_intersect_ids_list)
if self.role == consts.GUEST and len(self.host_party_id_list) > 1:
LOGGER.info(f"raw intersect send role is guest, "
f"and has {self.host_party_id_list} hosts, remote the final intersect_ids to hosts")
self.transfer_variable.sync_intersect_ids_multi_hosts.remote(recv_intersect_ids,
role=consts.HOST,
idx=-1)
if sid_hash_pair and recv_intersect_ids is not None:
hash_intersect_ids_map = recv_intersect_ids.join(sid_hash_pair, lambda r, s: s)
intersect_ids = hash_intersect_ids_map.map(lambda k, v: (v, None))
else:
intersect_ids = recv_intersect_ids
else:
LOGGER.info("Not Get intersect ids from role-join!")
return intersect_ids
def intersect_join_id(self, data_instances):
LOGGER.info("Join id role is {}".format(self.role))
sid_hash_pair = None
if self.use_hash and self.hash_method != "none":
sid_hash_pair = data_instances.map(
lambda k, v: (Intersect.hash(k, self.hash_operator, self.salt), k))
data_sid = sid_hash_pair.mapValues(lambda v: None)
else:
data_sid = data_instances.mapValues(lambda v: None)
if self.role == consts.HOST:
send_ids_federation = self.transfer_variable.send_ids_guest
elif self.role == consts.GUEST:
send_ids_federation = self.transfer_variable.send_ids_host
else:
raise ValueError("Unknown intersect role, please check the code")
recv_ids_list = send_ids_federation.get(idx=-1)
ids_list_size = len(recv_ids_list)
LOGGER.info("Get ids_list from role-send, ids_list size is {}".format(len(recv_ids_list)))
if ids_list_size == 1:
hash_intersect_ids = recv_ids_list[0].join(data_sid, lambda i, d: None)
elif ids_list_size > 1:
hash_intersect_ids_list = []
for ids in recv_ids_list:
hash_intersect_ids_list.append(ids.join(data_sid, lambda i, d: None))
hash_intersect_ids = self.get_common_intersection(hash_intersect_ids_list)
else:
hash_intersect_ids = None
LOGGER.info("Finish intersect_ids computing")
if self.sync_intersect_ids:
if self.role == consts.GUEST:
intersect_ids_federation = self.transfer_variable.intersect_ids_guest
send_role = consts.HOST
elif self.role == consts.HOST:
intersect_ids_federation = self.transfer_variable.intersect_ids_host
send_role = consts.GUEST
else:
raise ValueError("Unknown intersect role, please check the code")
intersect_ids_federation.remote(hash_intersect_ids,
role=send_role,
idx=-1)
LOGGER.info("Remote intersect ids to role-send")
if self.role == consts.HOST and len(self.host_party_id_list) > 1:
LOGGER.info(f"raw intersect join role is host,"
f"and has {self.host_party_id_list} hosts, get the final intersect_ids from guest")
hash_intersect_ids = self.transfer_variable.sync_intersect_ids_multi_hosts.get(idx=0)
if sid_hash_pair:
hash_intersect_ids_map = hash_intersect_ids.join(sid_hash_pair, lambda r, s: s)
intersect_ids = hash_intersect_ids_map.map(lambda k, v: (v, None))
else:
intersect_ids = hash_intersect_ids
return intersect_ids
| 7,318 | 43.357576 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/raw_intersect/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/rsa_intersect/rsa_intersect_base.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from federatedml.param.intersect_param import DEFAULT_RANDOM_BIT
from federatedml.secureprotol import gmpy_math
from federatedml.secureprotol.encrypt import RsaEncrypt
from federatedml.secureprotol.hash.hash_factory import Hash
from federatedml.statistic.intersect import Intersect
from federatedml.transfer_variable.transfer_class.rsa_intersect_transfer_variable import RsaIntersectTransferVariable
from federatedml.util import consts, LOGGER
class RsaIntersect(Intersect):
def __init__(self):
super().__init__()
# self.intersect_cache_param = intersect_params.intersect_cache_param
self.rcv_e = None
self.rcv_n = None
self.e = None
self.d = None
self.n = None
self.p = None
self.q = None
self.cp = None
self.cq = None
# self.r = None
self.transfer_variable = RsaIntersectTransferVariable()
self.role = None
def load_params(self, param):
super().load_params(param=param)
self.rsa_params = param.rsa_params
self.random_bit = self.rsa_params.random_bit
self.split_calculation = self.rsa_params.split_calculation
self.random_base_fraction = self.rsa_params.random_base_fraction
self.first_hash_operator = Hash(self.rsa_params.hash_method, False)
self.final_hash_operator = Hash(self.rsa_params.final_hash_method, False)
self.salt = self.rsa_params.salt
def get_intersect_method_meta(self):
rsa_meta = {"intersect_method": consts.RSA,
"hash_method": self.rsa_params.hash_method,
"final_hash_method": self.rsa_params.final_hash_method,
"salt": self.salt,
"random_bit": self.random_bit}
return rsa_meta
@staticmethod
def extend_pair(v1, v2):
return v1 + v2
@staticmethod
def pubkey_id_process(data, fraction, random_bit, rsa_e, rsa_n, hash_operator=None, salt=''):
if fraction and fraction <= consts.MAX_BASE_FRACTION:
LOGGER.debug(f"fraction value: {fraction} provided, use fraction in pubkey id process")
count = max(round(data.count() * max(fraction, consts.MIN_BASE_FRACTION)), 1)
def group_kv(kv_iterator):
res = []
for k, v in kv_iterator:
if hash_operator is not None:
v = (k, v)
k = int(Intersect.hash(k, hash_operator, salt), 16)
res.append((k % count, [(k, v)]))
return res
reduced_pair_group = data.mapReducePartitions(group_kv, RsaIntersect.extend_pair)
def pubkey_id_generate(k, pair):
r = random.SystemRandom().getrandbits(random_bit)
r_e = gmpy_math.powmod(r, rsa_e, rsa_n)
for hash_sid, v in pair:
processed_id = r_e * hash_sid % rsa_n
yield processed_id, (v[0], r)
return reduced_pair_group.flatMap(pubkey_id_generate)
else:
LOGGER.debug(f"fraction not provided or invalid, fraction value: {fraction}.")
return data.map(lambda k, v: RsaIntersect.pubkey_id_process_per(k, v, random_bit, rsa_e, rsa_n,
hash_operator, salt))
@staticmethod
def generate_rsa_key(rsa_bit=1024):
LOGGER.info(f"Generate {rsa_bit}-bit RSA key.")
encrypt_operator = RsaEncrypt()
encrypt_operator.generate_key(rsa_bit)
return encrypt_operator.get_key_pair()
def generate_protocol_key(self):
if self.role == consts.HOST:
self.e, self.d, self.n, self.p, self.q = self.generate_rsa_key(self.rsa_params.key_length)
self.cp, self.cq = gmpy_math.crt_coefficient(self.p, self.q)
else:
e, d, n, p, q, cp, cq = [], [], [], [], [], [], []
for i in range(len(self.host_party_id_list)):
e_i, d_i, n_i, p_i, q_i = self.generate_rsa_key(self.rsa_params.key_length)
cp_i, cq_i = gmpy_math.crt_coefficient(p_i, q_i)
e.append(e_i)
d.append(d_i)
n.append(n_i)
p.append(p_i)
q.append(q_i)
cp.append(cp_i)
cq.append(cq_i)
self.e = e
self.d = d
self.n = n
self.p = p
self.q = q
self.cp = cp
self.cq = cq
@staticmethod
def pubkey_id_process_per(hash_sid, v, random_bit, rsa_e, rsa_n, hash_operator=None, salt=''):
r = random.SystemRandom().getrandbits(random_bit)
if hash_operator:
processed_id = gmpy_math.powmod(r, rsa_e, rsa_n) * \
int(Intersect.hash(hash_sid, hash_operator, salt), 16) % rsa_n
return processed_id, (hash_sid, r)
else:
processed_id = gmpy_math.powmod(r, rsa_e, rsa_n) * hash_sid % rsa_n
return processed_id, (v[0], r)
@staticmethod
def prvkey_id_process(
hash_sid,
v,
rsa_d,
rsa_n,
rsa_p,
rsa_q,
cp,
cq,
final_hash_operator,
salt,
first_hash_operator=None):
if first_hash_operator:
processed_id = Intersect.hash(gmpy_math.powmod_crt(int(Intersect.hash(
hash_sid, first_hash_operator, salt), 16), rsa_d, rsa_n, rsa_p, rsa_q, cp, cq), final_hash_operator, salt)
return processed_id, hash_sid
else:
processed_id = Intersect.hash(gmpy_math.powmod_crt(hash_sid, rsa_d, rsa_n, rsa_p, rsa_q, cp, cq),
final_hash_operator,
salt)
return processed_id, v[0]
def cal_prvkey_ids_process_pair(self, data_instances, d, n, p, q, cp, cq, first_hash_operator=None):
return data_instances.map(
lambda k, v: self.prvkey_id_process(k, v, d, n, p, q, cp, cq,
self.final_hash_operator,
self.rsa_params.salt,
first_hash_operator)
)
@staticmethod
def sign_id(hash_sid, rsa_d, rsa_n, rsa_p, rsa_q, cp, cq):
return gmpy_math.powmod_crt(hash_sid, rsa_d, rsa_n, rsa_p, rsa_q, cp, cq)
def split_calculation_process(self, data_instances):
raise NotImplementedError("This method should not be called here")
def unified_calculation_process(self, data_instances):
raise NotImplementedError("This method should not be called here")
def cache_unified_calculation_process(self, data_instances, cache_set):
raise NotImplementedError("This method should not be called here")
def run_intersect(self, data_instances):
LOGGER.info("Start RSA Intersection")
if self.split_calculation:
# H(k), (k, v)
hash_data_instances = data_instances.map(
lambda k, v: (int(Intersect.hash(k, self.first_hash_operator, self.salt), 16), (k, v)))
intersect_ids = self.split_calculation_process(hash_data_instances)
else:
intersect_ids = self.unified_calculation_process(data_instances)
if intersect_ids is not None:
intersect_ids = intersect_ids.mapValues(lambda v: None)
return intersect_ids
def run_cache_intersect(self, data_instances, cache_data):
LOGGER.info("Start RSA Intersection with cache")
if self.split_calculation:
LOGGER.warning(f"split_calculation not applicable to cache-enabled RSA intersection.")
intersect_ids = self.cache_unified_calculation_process(data_instances, cache_data)
if intersect_ids is not None:
intersect_ids = intersect_ids.mapValues(lambda v: None)
return intersect_ids
| 8,652 | 41.416667 | 122 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/rsa_intersect/rsa_intersect_guest.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gmpy2
from federatedml.statistic.intersect.rsa_intersect.rsa_intersect_base import RsaIntersect
from federatedml.util import consts, LOGGER
class RsaIntersectionGuest(RsaIntersect):
def __init__(self):
super().__init__()
self.role = consts.GUEST
def get_host_prvkey_ids(self):
host_prvkey_ids_list = self.transfer_variable.host_prvkey_ids.get(idx=-1)
LOGGER.info("Get host_prvkey_ids from all host")
return host_prvkey_ids_list
def get_host_filter(self):
host_filter_list = self.transfer_variable.host_filter.get(idx=-1)
LOGGER.info("Get host_filter from all host")
return host_filter_list
def get_host_pubkey_ids(self):
host_pubkey_ids_list = self.transfer_variable.host_pubkey_ids.get(idx=-1)
LOGGER.info("Get host_pubkey_ids from all host")
return host_pubkey_ids_list
def sign_host_ids(self, host_pubkey_ids_list):
# Process(signs) hosts' ids
guest_sign_host_ids_list = [host_pubkey_ids.map(lambda k, v:
(k, self.sign_id(k,
self.d[i],
self.n[i],
self.p[i],
self.q[i],
self.cp[i],
self.cq[i])))
for i, host_pubkey_ids in enumerate(host_pubkey_ids_list)]
LOGGER.info("Sign host_pubkey_ids with guest prv_keys")
return guest_sign_host_ids_list
def send_intersect_ids(self, encrypt_intersect_ids_list, intersect_ids):
if len(self.host_party_id_list) > 1:
for i, host_party_id in enumerate(self.host_party_id_list):
remote_intersect_id = intersect_ids.map(lambda k, v: (v[i], None))
self.transfer_variable.intersect_ids.remote(remote_intersect_id,
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote intersect ids to Host {host_party_id}!")
else:
remote_intersect_id = encrypt_intersect_ids_list[0].mapValues(lambda v: None)
self.transfer_variable.intersect_ids.remote(remote_intersect_id,
role=consts.HOST,
idx=0)
LOGGER.info(f"Remote intersect ids to Host!")
def get_host_intersect_ids(self, guest_prvkey_ids_list):
encrypt_intersect_ids_list = self.transfer_variable.host_intersect_ids.get(idx=-1)
LOGGER.info("Get intersect ids from Host")
intersect_ids_pair_list = [self.extract_intersect_ids(ids,
guest_prvkey_ids_list[i]) for i, ids in
enumerate(encrypt_intersect_ids_list)]
intersect_ids = self.filter_intersect_ids(intersect_ids_pair_list, keep_encrypt_ids=True)
return intersect_ids
def split_calculation_process(self, data_instances):
LOGGER.info("RSA intersect using split calculation.")
# split data
sid_hash_odd = data_instances.filter(lambda k, v: k & 1)
sid_hash_even = data_instances.filter(lambda k, v: not k & 1)
# LOGGER.debug(f"sid_hash_odd count: {sid_hash_odd.count()},"
# f"odd fraction: {sid_hash_odd.count()/data_instances.count()}")
# generate pub keys for even ids
self.generate_protocol_key()
LOGGER.info("Generate guest protocol key!")
# send public key e & n to all host
for i, host_party_id in enumerate(self.host_party_id_list):
guest_public_key = {"e": self.e[i], "n": self.n[i]}
self.transfer_variable.guest_pubkey.remote(guest_public_key,
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote public key to Host {host_party_id}.")
# receive host pub keys for odd ids
host_public_keys = self.transfer_variable.host_pubkey.get(idx=-1)
# LOGGER.debug("Get host_public_key:{} from Host".format(host_public_keys))
LOGGER.info(f"Get host_public_key from Host")
self.rcv_e = [int(public_key["e"]) for public_key in host_public_keys]
self.rcv_n = [int(public_key["n"]) for public_key in host_public_keys]
# encrypt own odd ids with pub keys from host
pubkey_ids_process_list = [self.pubkey_id_process(sid_hash_odd,
fraction=self.random_base_fraction,
random_bit=self.random_bit,
rsa_e=self.rcv_e[i],
rsa_n=self.rcv_n[i]) for i in range(len(self.rcv_e))]
LOGGER.info(f"Perform pubkey_ids_process")
for i, guest_id in enumerate(pubkey_ids_process_list):
mask_guest_id = guest_id.mapValues(lambda v: None)
self.transfer_variable.guest_pubkey_ids.remote(mask_guest_id,
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote guest_pubkey_ids to Host {i}")
# encrypt & send prvkey encrypted guest even ids to host
prvkey_ids_process_pair_list = []
for i, host_party_id in enumerate(self.host_party_id_list):
prvkey_ids_process_pair = self.cal_prvkey_ids_process_pair(sid_hash_even,
self.d[i],
self.n[i],
self.p[i],
self.q[i],
self.cp[i],
self.cq[i])
prvkey_ids_process = prvkey_ids_process_pair.mapValues(lambda v: None)
self.transfer_variable.guest_prvkey_ids.remote(prvkey_ids_process,
role=consts.HOST,
idx=i)
prvkey_ids_process_pair_list.append(prvkey_ids_process_pair)
LOGGER.info(f"Remote guest_prvkey_ids to host {host_party_id}")
# get & sign host pub key encrypted even ids
host_pubkey_ids_list = self.get_host_pubkey_ids()
guest_sign_host_ids_list = self.sign_host_ids(host_pubkey_ids_list)
# send signed host even ids
for i, host_party_id in enumerate(self.host_party_id_list):
self.transfer_variable.guest_sign_host_ids.remote(guest_sign_host_ids_list[i],
role=consts.HOST,
idx=i)
LOGGER.info(f"Remote guest_sign_host_ids to Host {host_party_id}.")
# get prvkey encrypted odd ids from host
host_prvkey_ids_list = self.get_host_prvkey_ids()
# Recv host signed odd ids
# table(guest_pubkey_id, host signed odd ids)
recv_host_sign_guest_ids_list = self.transfer_variable.host_sign_guest_ids.get(idx=-1)
LOGGER.info("Get host_sign_guest_ids from Host")
# table(r^e % n *hash(sid), sid, hash(guest_ids_process/r))
# g[0]=(r^e % n *hash(sid), sid), g[1]=random bits r
host_sign_guest_ids_list = [v.join(recv_host_sign_guest_ids_list[i],
lambda g, r: (g[0], RsaIntersectionGuest.hash(gmpy2.divm(int(r),
int(g[1]),
self.rcv_n[i]),
self.final_hash_operator,
self.rsa_params.salt)))
for i, v in enumerate(pubkey_ids_process_list)]
# table(hash(guest_ids_process/r), sid))
sid_host_sign_guest_ids_list = [g.map(lambda k, v: (v[1], v[0])) for g in host_sign_guest_ids_list]
# get intersect odd ids
# intersect table(hash(guest_ids_process/r), sid)
encrypt_intersect_odd_ids_list = [v.join(host_prvkey_ids_list[i], lambda sid, h: sid) for i, v in
enumerate(sid_host_sign_guest_ids_list)]
intersect_odd_ids = self.filter_intersect_ids(encrypt_intersect_odd_ids_list, keep_encrypt_ids=True)
intersect_even_ids = self.get_host_intersect_ids(prvkey_ids_process_pair_list)
intersect_ids = intersect_odd_ids.union(intersect_even_ids)
if self.sync_intersect_ids:
self.send_intersect_ids(encrypt_intersect_odd_ids_list, intersect_odd_ids)
else:
LOGGER.info("Skip sync intersect ids with Host(s).")
return intersect_ids
def unified_calculation_process(self, data_instances):
LOGGER.info("RSA intersect using unified calculation.")
# receives public key e & n
public_keys = self.transfer_variable.host_pubkey.get(idx=-1)
# LOGGER.debug(f"Get RSA host_public_key:{public_keys} from Host")
LOGGER.info(f"Get RSA host_public_key from Host")
self.rcv_e = [int(public_key["e"]) for public_key in public_keys]
self.rcv_n = [int(public_key["n"]) for public_key in public_keys]
pubkey_ids_process_list = [self.pubkey_id_process(data_instances,
fraction=self.random_base_fraction,
random_bit=self.random_bit,
rsa_e=self.rcv_e[i],
rsa_n=self.rcv_n[i],
hash_operator=self.first_hash_operator,
salt=self.salt) for i in range(len(self.rcv_e))]
LOGGER.info(f"Finish pubkey_ids_process")
for i, guest_id in enumerate(pubkey_ids_process_list):
mask_guest_id = guest_id.mapValues(lambda v: None)
self.transfer_variable.guest_pubkey_ids.remote(mask_guest_id,
role=consts.HOST,
idx=i)
LOGGER.info("Remote guest_pubkey_ids to Host {}".format(i))
host_prvkey_ids_list = self.get_host_prvkey_ids()
LOGGER.info("Get host_prvkey_ids")
# Recv signed guest ids
# table(r^e % n *hash(sid), guest_id_process)
recv_host_sign_guest_ids_list = self.transfer_variable.host_sign_guest_ids.get(idx=-1)
LOGGER.info("Get host_sign_guest_ids from Host")
# table(r^e % n *hash(sid), sid, hash(guest_ids_process/r))
# g[0]=(r^e % n *hash(sid), sid), g[1]=random bits r
host_sign_guest_ids_list = [v.join(recv_host_sign_guest_ids_list[i],
lambda g, r: (g[0], RsaIntersectionGuest.hash(gmpy2.divm(int(r),
int(g[1]),
self.rcv_n[i]),
self.final_hash_operator,
self.rsa_params.salt)))
for i, v in enumerate(pubkey_ids_process_list)]
# table(hash(guest_ids_process/r), sid))
sid_host_sign_guest_ids_list = [g.map(lambda k, v: (v[1], v[0])) for g in host_sign_guest_ids_list]
# intersect table(hash(guest_ids_process/r), sid)
encrypt_intersect_ids_list = [v.join(host_prvkey_ids_list[i], lambda sid, h: sid) for i, v in
enumerate(sid_host_sign_guest_ids_list)]
intersect_ids = self.filter_intersect_ids(encrypt_intersect_ids_list, keep_encrypt_ids=True)
if self.sync_intersect_ids:
self.send_intersect_ids(encrypt_intersect_ids_list, intersect_ids)
else:
LOGGER.info("Skip sync intersect ids with Host(s).")
return intersect_ids
def get_intersect_key(self, party_id):
idx = self.host_party_id_list.index(party_id)
intersect_key = {"rcv_n": str(self.rcv_n[idx]),
"rcv_e": str(self.rcv_e[idx])}
return intersect_key
def load_intersect_key(self, cache_meta):
self.rcv_e, self.rcv_n = [], []
for host_party in self.host_party_id_list:
intersect_key = cache_meta[str(host_party)]["intersect_key"]
self.rcv_e.append(int(intersect_key["rcv_e"]))
self.rcv_n.append(int(intersect_key["rcv_n"]))
def run_cardinality(self, data_instances):
LOGGER.info(f"run cardinality_only with RSA")
# receives public key e & n
public_keys = self.transfer_variable.host_pubkey.get(idx=-1)
LOGGER.info(f"Get RSA host_public_key from Host")
self.rcv_e = [int(public_key["e"]) for public_key in public_keys]
self.rcv_n = [int(public_key["n"]) for public_key in public_keys]
pubkey_ids_process_list = [self.pubkey_id_process(data_instances,
fraction=self.random_base_fraction,
random_bit=self.random_bit,
rsa_e=self.rcv_e[i],
rsa_n=self.rcv_n[i],
hash_operator=self.first_hash_operator,
salt=self.salt) for i in range(len(self.rcv_e))]
LOGGER.info(f"Finish pubkey_ids_process")
for i, guest_id in enumerate(pubkey_ids_process_list):
mask_guest_id = guest_id.mapValues(lambda v: None)
self.transfer_variable.guest_pubkey_ids.remote(mask_guest_id,
role=consts.HOST,
idx=i)
LOGGER.info("Remote guest_pubkey_ids to Host {}".format(i))
host_filter_list = self.get_host_filter()
LOGGER.info("Get host_filter_list")
# Recv signed guest ids
# table(r^e % n *hash(sid), guest_id_process)
recv_host_sign_guest_ids_list = self.transfer_variable.host_sign_guest_ids.get(idx=-1)
LOGGER.info("Get host_sign_guest_ids from Host")
# table(r^e % n *hash(sid), sid, hash(guest_ids_process/r))
# g[0]=(r^e % n *hash(sid), sid), g[1]=random bits r
host_sign_guest_ids_list = [v.join(recv_host_sign_guest_ids_list[i],
lambda g, r: (g[0], RsaIntersectionGuest.hash(gmpy2.divm(int(r),
int(g[1]),
self.rcv_n[i]),
self.final_hash_operator,
self.rsa_params.salt)))
for i, v in enumerate(pubkey_ids_process_list)]
# table(hash(guest_ids_process/r), sid))
# sid_host_sign_guest_ids_list = [g.map(lambda k, v: (v[1], v[0])) for g in host_sign_guest_ids_list]
# filter ids
intersect_ids_list = [host_sign_guest_ids_list[i].filter(lambda k, v: host_filter_list[i].check(v[1]))
for i in range(len(self.host_party_id_list))]
intersect_ids_list = [ids.map(lambda k, v: (v[0], None)) for ids in intersect_ids_list]
intersect_ids = self.get_common_intersection(intersect_ids_list)
self.intersect_num = intersect_ids.count()
if self.sync_cardinality:
self.transfer_variable.cardinality.remote(self.intersect_num, role=consts.HOST, idx=-1)
LOGGER.info("Sent intersect cardinality to host.")
else:
LOGGER.info("Skip sync intersect cardinality with host(s)")
def generate_cache(self, data_instances):
LOGGER.info("Run RSA intersect cache")
# receives public key e & n
public_keys = self.transfer_variable.host_pubkey.get(idx=-1)
# LOGGER.debug(f"Get RSA host_public_key:{public_keys} from Host")
LOGGER.info(f"Get RSA host_public_key from Host")
self.rcv_e = [int(public_key["e"]) for public_key in public_keys]
self.rcv_n = [int(public_key["n"]) for public_key in public_keys]
cache_id_list = self.cache_transfer_variable.get(idx=-1)
LOGGER.info(f"Get cache_id from all host")
host_prvkey_ids_list = self.get_host_prvkey_ids()
LOGGER.info("Get host_prvkey_ids")
cache_data, cache_meta = {}, {}
intersect_meta = self.get_intersect_method_meta()
for i, party_id in enumerate(self.host_party_id_list):
meta = {"cache_id": cache_id_list[i],
"intersect_meta": intersect_meta,
"intersect_key": self.get_intersect_key(party_id)
}
cache_meta[party_id] = meta
cache_data[party_id] = host_prvkey_ids_list[i]
return cache_data, cache_meta
def cache_unified_calculation_process(self, data_instances, cache_data):
LOGGER.info("RSA intersect using cache.")
pubkey_ids_process_list = [self.pubkey_id_process(data_instances,
fraction=self.random_base_fraction,
random_bit=self.random_bit,
rsa_e=self.rcv_e[i],
rsa_n=self.rcv_n[i],
hash_operator=self.first_hash_operator,
salt=self.salt) for i in range(len(self.rcv_e))]
LOGGER.info(f"Finish pubkey_ids_process")
for i, guest_id in enumerate(pubkey_ids_process_list):
mask_guest_id = guest_id.mapValues(lambda v: None)
self.transfer_variable.guest_pubkey_ids.remote(mask_guest_id,
role=consts.HOST,
idx=i)
LOGGER.info("Remote guest_pubkey_ids to Host {}".format(i))
# Recv signed guest ids
# table(r^e % n *hash(sid), guest_id_process)
recv_host_sign_guest_ids_list = self.transfer_variable.host_sign_guest_ids.get(idx=-1)
LOGGER.info("Get host_sign_guest_ids from Host")
# table(r^e % n *hash(sid), sid, hash(guest_ids_process/r))
# g[0]=(r^e % n *hash(sid), sid), g[1]=random bits r
host_sign_guest_ids_list = [v.join(recv_host_sign_guest_ids_list[i],
lambda g, r: (g[0], RsaIntersectionGuest.hash(gmpy2.divm(int(r),
int(g[1]),
self.rcv_n[i]),
self.final_hash_operator,
self.rsa_params.salt)))
for i, v in enumerate(pubkey_ids_process_list)]
# table(hash(guest_ids_process/r), sid))
sid_host_sign_guest_ids_list = [g.map(lambda k, v: (v[1], v[0])) for g in host_sign_guest_ids_list]
# intersect table(hash(guest_ids_process/r), sid)
host_prvkey_ids_list = self.extract_cache_list(cache_data, self.host_party_id_list)
encrypt_intersect_ids_list = [v.join(host_prvkey_ids_list[i], lambda sid, h: sid) for i, v in
enumerate(sid_host_sign_guest_ids_list)]
intersect_ids = self.filter_intersect_ids(encrypt_intersect_ids_list, keep_encrypt_ids=True)
if self.sync_intersect_ids:
self.send_intersect_ids(encrypt_intersect_ids_list, intersect_ids)
else:
LOGGER.info("Skip sync intersect ids with Host(s).")
return intersect_ids
| 22,292 | 55.153652 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/rsa_intersect/rsa_intersect_host.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gmpy2
import uuid
from federatedml.statistic.intersect.rsa_intersect.rsa_intersect_base import RsaIntersect
from federatedml.util import consts, LOGGER
class RsaIntersectionHost(RsaIntersect):
def __init__(self):
super().__init__()
self.role = consts.HOST
def split_calculation_process(self, data_instances):
LOGGER.info("RSA intersect using split calculation.")
# split data
sid_hash_odd = data_instances.filter(lambda k, v: k & 1)
sid_hash_even = data_instances.filter(lambda k, v: not k & 1)
# LOGGER.debug(f"sid_hash_odd count: {sid_hash_odd.count()},"
# f"odd fraction: {sid_hash_odd.count()/data_instances.count()}")
# generate rsa keys
# self.e, self.d, self.n = self.generate_protocol_key()
self.generate_protocol_key()
LOGGER.info("Generate host protocol key!")
public_key = {"e": self.e, "n": self.n}
# sends public key e & n to guest
self.transfer_variable.host_pubkey.remote(public_key,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote public key to Guest.")
# generate ri for even ids
# count = sid_hash_even.count()
# self.r = self.generate_r_base(self.random_bit, count, self.random_base_fraction)
# LOGGER.info(f"Generate {len(self.r)} r values.")
# receive guest key for even ids
guest_public_key = self.transfer_variable.guest_pubkey.get(idx=0)
# LOGGER.debug("Get guest_public_key:{} from Guest".format(guest_public_key))
LOGGER.info(f"Get guest_public_key from Guest")
self.rcv_e = int(guest_public_key["e"])
self.rcv_n = int(guest_public_key["n"])
# encrypt & send guest pubkey-encrypted odd ids
pubkey_ids_process = self.pubkey_id_process(sid_hash_even,
fraction=self.random_base_fraction,
random_bit=self.random_bit,
rsa_e=self.rcv_e,
rsa_n=self.rcv_n)
LOGGER.info(f"Finish pubkey_ids_process")
mask_host_id = pubkey_ids_process.mapValues(lambda v: None)
self.transfer_variable.host_pubkey_ids.remote(mask_host_id,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_pubkey_ids to Guest")
# encrypt & send prvkey-encrypted host odd ids to guest
prvkey_ids_process_pair = self.cal_prvkey_ids_process_pair(sid_hash_odd,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq)
prvkey_ids_process = prvkey_ids_process_pair.mapValues(lambda v: None)
self.transfer_variable.host_prvkey_ids.remote(prvkey_ids_process,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_prvkey_ids to Guest.")
# get & sign guest pubkey-encrypted odd ids
guest_pubkey_ids = self.transfer_variable.guest_pubkey_ids.get(idx=0)
LOGGER.info(f"Get guest_pubkey_ids from guest")
host_sign_guest_ids = guest_pubkey_ids.map(lambda k, v: (k, self.sign_id(k,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq)))
LOGGER.debug(f"host sign guest_pubkey_ids")
# send signed guest odd ids
self.transfer_variable.host_sign_guest_ids.remote(host_sign_guest_ids,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_sign_guest_ids_process to Guest.")
# recv guest privkey-encrypted even ids
guest_prvkey_ids = self.transfer_variable.guest_prvkey_ids.get(idx=0)
LOGGER.info("Get guest_prvkey_ids")
# receive guest-signed host even ids
recv_guest_sign_host_ids = self.transfer_variable.guest_sign_host_ids.get(idx=0)
LOGGER.info(f"Get guest_sign_host_ids from Guest.")
guest_sign_host_ids = pubkey_ids_process.join(recv_guest_sign_host_ids,
lambda g, r: (g[0],
RsaIntersectionHost.hash(gmpy2.divm(int(r),
int(g[1]),
self.rcv_n),
self.final_hash_operator,
self.rsa_params.salt)))
sid_guest_sign_host_ids = guest_sign_host_ids.map(lambda k, v: (v[1], v[0]))
encrypt_intersect_even_ids = sid_guest_sign_host_ids.join(guest_prvkey_ids, lambda sid, h: sid)
# filter & send intersect even ids
intersect_even_ids = self.filter_intersect_ids([encrypt_intersect_even_ids])
remote_intersect_even_ids = encrypt_intersect_even_ids.mapValues(lambda v: None)
self.transfer_variable.host_intersect_ids.remote(remote_intersect_even_ids, role=consts.GUEST, idx=0)
LOGGER.info(f"Remote host intersect ids to Guest")
# recv intersect ids
intersect_ids = None
if self.sync_intersect_ids:
encrypt_intersect_odd_ids = self.transfer_variable.intersect_ids.get(idx=0)
intersect_odd_ids_pair = encrypt_intersect_odd_ids.join(prvkey_ids_process_pair, lambda e, h: h)
intersect_odd_ids = intersect_odd_ids_pair.map(lambda k, v: (v, None))
intersect_ids = intersect_odd_ids.union(intersect_even_ids)
LOGGER.info("Get intersect ids from Guest")
return intersect_ids
def unified_calculation_process(self, data_instances):
LOGGER.info("RSA intersect using unified calculation.")
# generate rsa keys
# self.e, self.d, self.n = self.generate_protocol_key()
self.generate_protocol_key()
LOGGER.info("Generate protocol key!")
public_key = {"e": self.e, "n": self.n}
# sends public key e & n to guest
self.transfer_variable.host_pubkey.remote(public_key,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote public key to Guest.")
# hash host ids
prvkey_ids_process_pair = self.cal_prvkey_ids_process_pair(data_instances,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq,
self.first_hash_operator)
prvkey_ids_process = prvkey_ids_process_pair.mapValues(lambda v: None)
self.transfer_variable.host_prvkey_ids.remote(prvkey_ids_process,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_ids_process to Guest.")
# Recv guest ids
guest_pubkey_ids = self.transfer_variable.guest_pubkey_ids.get(idx=0)
LOGGER.info("Get guest_pubkey_ids from guest")
# Process(signs) guest ids and return to guest
host_sign_guest_ids = guest_pubkey_ids.map(lambda k, v: (k, self.sign_id(k,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq)))
self.transfer_variable.host_sign_guest_ids.remote(host_sign_guest_ids,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_sign_guest_ids_process to Guest.")
# recv intersect ids
intersect_ids = None
if self.sync_intersect_ids:
encrypt_intersect_ids = self.transfer_variable.intersect_ids.get(idx=0)
intersect_ids_pair = encrypt_intersect_ids.join(prvkey_ids_process_pair, lambda e, h: h)
intersect_ids = intersect_ids_pair.map(lambda k, v: (v, None))
LOGGER.info("Get intersect ids from Guest")
return intersect_ids
def get_intersect_key(self, party_id=None):
intersect_key = {"e": str(self.e),
"d": str(self.d),
"n": str(self.n),
"p": str(self.p),
"q": str(self.q),
"cp": str(self.cp),
"cq": str(self.cq)}
return intersect_key
def load_intersect_key(self, cache_meta):
intersect_key = cache_meta[str(self.guest_party_id)]["intersect_key"]
self.e = int(intersect_key["e"])
self.d = int(intersect_key["d"])
self.n = int(intersect_key["n"])
self.p = int(intersect_key["p"])
self.q = int(intersect_key["q"])
self.cp = int(intersect_key["cp"])
self.cq = int(intersect_key["cq"])
def run_cardinality(self, data_instances):
LOGGER.info(f"run cardinality_only with RSA")
# generate rsa keys
self.generate_protocol_key()
LOGGER.info("Generate protocol key!")
public_key = {"e": self.e, "n": self.n}
# sends public key e & n to guest
self.transfer_variable.host_pubkey.remote(public_key,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote public key to Guest.")
# hash host ids
prvkey_ids_process_pair = self.cal_prvkey_ids_process_pair(data_instances,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq,
self.first_hash_operator)
filter = self.construct_filter(prvkey_ids_process_pair,
false_positive_rate=self.intersect_preprocess_params.false_positive_rate,
hash_method=self.intersect_preprocess_params.hash_method,
random_state=self.intersect_preprocess_params.random_state)
self.filter = filter
self.transfer_variable.host_filter.remote(filter,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_filter to Guest.")
# Recv guest ids
guest_pubkey_ids = self.transfer_variable.guest_pubkey_ids.get(idx=0)
LOGGER.info("Get guest_pubkey_ids from guest")
# Process(signs) guest ids and return to guest
host_sign_guest_ids = guest_pubkey_ids.map(lambda k, v: (k, self.sign_id(k,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq)))
self.transfer_variable.host_sign_guest_ids.remote(host_sign_guest_ids,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_sign_guest_ids_process to Guest.")
if self.sync_cardinality:
self.intersect_num = self.transfer_variable.cardinality.get(idx=0)
LOGGER.info("Got intersect cardinality from guest.")
def generate_cache(self, data_instances):
LOGGER.info("Run RSA intersect cache.")
# generate rsa keys
# self.e, self.d, self.n = self.generate_protocol_key()
self.generate_protocol_key()
LOGGER.info("Generate protocol key!")
public_key = {"e": self.e, "n": self.n}
# sends public key e & n to guest
self.transfer_variable.host_pubkey.remote(public_key,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote public key to Guest.")
# hash host ids
prvkey_ids_process_pair = self.cal_prvkey_ids_process_pair(data_instances,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq,
self.first_hash_operator)
prvkey_ids_process = prvkey_ids_process_pair.mapValues(lambda v: None)
cache_id = str(uuid.uuid4())
# self.cache_id = {self.guest_party_id: cache_id}
# cache_schema = {"cache_id": cache_id}
# self.cache = prvkey_ids_process_pair
# prvkey_ids_process.schema = cache_schema
self.cache_transfer_variable.remote(cache_id, role=consts.GUEST, idx=0)
LOGGER.info(f"remote cache_id to guest")
self.transfer_variable.host_prvkey_ids.remote(prvkey_ids_process,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_ids_process to Guest.")
# prvkey_ids_process_pair.schema = cache_schema
cache_data = {self.guest_party_id: prvkey_ids_process_pair}
cache_meta = {self.guest_party_id: {"cache_id": cache_id,
"intersect_meta": self.get_intersect_method_meta(),
"intersect_key": self.get_intersect_key()}}
return cache_data, cache_meta
def cache_unified_calculation_process(self, data_instances, cache_data):
LOGGER.info("RSA intersect using cache.")
cache = self.extract_cache_list(cache_data, self.guest_party_id)[0]
# Recv guest ids
guest_pubkey_ids = self.transfer_variable.guest_pubkey_ids.get(idx=0)
LOGGER.info("Get guest_pubkey_ids from guest")
# Process(signs) guest ids and return to guest
host_sign_guest_ids = guest_pubkey_ids.map(lambda k, v: (k, self.sign_id(k,
self.d,
self.n,
self.p,
self.q,
self.cp,
self.cq)))
self.transfer_variable.host_sign_guest_ids.remote(host_sign_guest_ids,
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_sign_guest_ids_process to Guest.")
# recv intersect ids
intersect_ids = None
if self.sync_intersect_ids:
encrypt_intersect_ids = self.transfer_variable.intersect_ids.get(idx=0)
intersect_ids_pair = encrypt_intersect_ids.join(cache, lambda e, h: h)
intersect_ids = intersect_ids_pair.map(lambda k, v: (v, None))
LOGGER.info("Get intersect ids from Guest")
return intersect_ids
| 18,839 | 53.450867 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/statistic/intersect/rsa_intersect/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/sparse_vector.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Sparse Feature
# =============================================================================
class SparseVector(object):
"""
Sparse storage data format of federatedml
Parameters
----------
sparse_vec : dict, record (indice, data) kv tuples
shape : the real feature shape of data
"""
def __init__(self, indices=None, data=None, shape=0):
self.sparse_vec = dict(zip(indices, data))
self.shape = shape
def get_data(self, pos, default_val=None):
return self.sparse_vec.get(pos, default_val)
def count_non_zeros(self):
return len(self.sparse_vec)
def count_zeros(self):
return self.shape - len(self.sparse_vec)
def get_shape(self):
return self.shape
def set_shape(self, shape):
self.shape = shape
def get_all_data(self):
for idx, data in self.sparse_vec.items():
yield idx, data
def get_sparse_vector(self):
return self.sparse_vec
def set_sparse_vector(self, sparse_vec):
self.sparse_vec = sparse_vec
@staticmethod
def is_sparse_vector():
return True
| 2,047 | 27.054795 | 80 |
py
|
FATE
|
FATE-master/python/federatedml/feature/fate_element_type.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class NoneType(object):
def __eq__(self, obj):
return isinstance(obj, NoneType)
def __hash__(self):
return hash("FATENoneType")
| 818 | 29.333333 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/imputer.py
|
import copy
import functools
import numpy as np
from federatedml.feature.fate_element_type import NoneType
from federatedml.feature.instance import Instance
from federatedml.statistic import data_overview
from federatedml.statistic.data_overview import get_header
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.util import LOGGER
from federatedml.util import consts
class Imputer(object):
"""
This class provides basic strategies for values replacement. It can be used as missing filled or outlier replace.
You can use the statistics such as mean, median or max of each column to fill the missing value or replace outlier.
"""
def __init__(self, missing_value_list=None):
"""
Parameters
----------
missing_value_list: list, the value to be replaced. Default None, if is None, it will be set to list of blank, none, null and na,
which regarded as missing filled. If not, it can be outlier replace, and missing_value_list includes the outlier values
"""
if missing_value_list is None:
self.missing_value_list = ['', 'none', 'null', 'na', 'None', np.nan]
else:
self.missing_value_list = missing_value_list
self.abnormal_value_list = copy.deepcopy(self.missing_value_list)
for i, v in enumerate(self.missing_value_list):
if v != v:
self.missing_value_list[i] = np.nan
self.abnormal_value_list[i] = NoneType()
self.abnormal_value_set = set(self.abnormal_value_list)
self.support_replace_method = ['min', 'max', 'mean', 'median', 'designated']
self.support_output_format = {
'str': str,
'float': float,
'int': int,
'origin': None
}
self.support_replace_area = {
'min': 'col',
'max': 'col',
'mean': 'col',
'median': 'col',
'designated': 'col'
}
self.cols_fit_impute_rate = []
self.cols_transform_impute_rate = []
self.cols_replace_method = []
self.skip_cols = []
def get_missing_value_list(self):
return self.missing_value_list
def get_cols_replace_method(self):
return self.cols_replace_method
def get_skip_cols(self):
return self.skip_cols
def get_impute_rate(self, mode="fit"):
if mode == "fit":
return list(self.cols_fit_impute_rate)
elif mode == "transform":
return list(self.cols_transform_impute_rate)
else:
raise ValueError("Unknown mode of {}".format(mode))
@staticmethod
def replace_missing_value_with_cols_transform_value_format(data, transform_list, missing_value_list,
output_format, skip_cols):
_data = copy.deepcopy(data)
replace_cols_index_list = []
if isinstance(_data, Instance):
for i, v in enumerate(_data.features):
if v in missing_value_list and i not in skip_cols:
_data.features[i] = output_format(transform_list[i])
replace_cols_index_list.append(i)
else:
_data[i] = output_format(v)
else:
for i, v in enumerate(_data):
if str(v) in missing_value_list and i not in skip_cols:
_data[i] = output_format(transform_list[i])
replace_cols_index_list.append(i)
else:
_data[i] = output_format(v)
return _data, replace_cols_index_list
@staticmethod
def replace_missing_value_with_cols_transform_value(data, transform_list, missing_value_list, skip_cols):
_data = copy.deepcopy(data)
replace_cols_index_list = []
if isinstance(_data, Instance):
new_features = []
for i, v in enumerate(_data.features):
if v in missing_value_list and i not in skip_cols:
# _data.features[i] = transform_list[i]
new_features.append(transform_list[i])
replace_cols_index_list.append(i)
else:
new_features.append(v)
if replace_cols_index_list:
# new features array will have lowest compatible dtype
_data.features = np.array(new_features)
else:
for i, v in enumerate(_data):
if str(v) in missing_value_list and i not in skip_cols:
_data[i] = str(transform_list[i])
replace_cols_index_list.append(i)
return _data, replace_cols_index_list
@staticmethod
def replace_missing_value_with_replace_value_format(data, replace_value, missing_value_list, output_format):
_data = copy.deepcopy(data)
replace_cols_index_list = []
if isinstance(_data, Instance):
for i, v in enumerate(_data.features):
if v in missing_value_list:
_data.features[i] = replace_value
replace_cols_index_list.append(i)
else:
_data[i] = output_format(_data[i])
else:
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = output_format(replace_value)
replace_cols_index_list.append(i)
else:
_data[i] = output_format(_data[i])
return _data, replace_cols_index_list
@staticmethod
def replace_missing_value_with_replace_value(data, replace_value, missing_value_list):
_data = copy.deepcopy(data)
replace_cols_index_list = []
if isinstance(_data, Instance):
new_features = []
for i, v in enumerate(_data.features):
if v in missing_value_list:
# _data.features[i] = replace_value
new_features.append(replace_value)
replace_cols_index_list.append(i)
else:
new_features.append(v)
if replace_cols_index_list:
# make sure new features array has lowest compatible dtype
_data.features = np.array(new_features)
else:
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = str(replace_value)
replace_cols_index_list.append(i)
return _data, replace_cols_index_list
@staticmethod
def __get_cols_transform_method(data, replace_method, col_replace_method):
header = get_header(data)
if col_replace_method:
replace_method_per_col = {col_name: col_replace_method.get(col_name, replace_method) for col_name in header}
else:
replace_method_per_col = {col_name: replace_method for col_name in header}
skip_cols = [v for v in header if replace_method_per_col[v] is None]
return replace_method_per_col, skip_cols
def __get_cols_transform_value(self, data, replace_method, replace_value=None):
"""
Parameters
----------
data: input data
replace_method: dictionary of (column name, replace_method_name) pairs
Returns
-------
list of transform value for each column, length equal to feature count of input data
"""
summary_obj = MultivariateStatisticalSummary(data, -1, abnormal_list=self.abnormal_value_list)
header = get_header(data)
cols_transform_value = {}
if isinstance(replace_value, list):
if len(replace_value) != len(header):
raise ValueError(
f"replace value {replace_value} length does not match with header {header}, please check.")
for i, feature in enumerate(header):
if replace_method[feature] is None:
transform_value = 0
elif replace_method[feature] == consts.MIN:
transform_value = summary_obj.get_min()[feature]
elif replace_method[feature] == consts.MAX:
transform_value = summary_obj.get_max()[feature]
elif replace_method[feature] == consts.MEAN:
transform_value = summary_obj.get_mean()[feature]
elif replace_method[feature] == consts.MEDIAN:
transform_value = summary_obj.get_median()[feature]
elif replace_method[feature] == consts.DESIGNATED:
if isinstance(replace_value, list):
transform_value = replace_value[i]
else:
transform_value = replace_value
LOGGER.debug(f"replace value for feature {feature} is: {transform_value}")
else:
raise ValueError("Unknown replace method:{}".format(replace_method))
cols_transform_value[feature] = transform_value
LOGGER.debug(f"cols_transform value is: {cols_transform_value}")
cols_transform_value = [cols_transform_value[key] for key in header]
# cols_transform_value = {i: round(cols_transform_value[key], 6) for i, key in enumerate(header)}
LOGGER.debug(f"cols_transform value is: {cols_transform_value}")
return cols_transform_value
@staticmethod
def _transform_nan(instance):
feature_shape = instance.features.shape[0]
new_features = []
for i in range(feature_shape):
if instance.features[i] != instance.features[i]:
new_features.append(NoneType())
else:
new_features.append(instance.features[i])
new_instance = copy.deepcopy(instance)
new_instance.features = np.array(new_features)
return new_instance
def __fit_replace(self, data, replace_method, replace_value=None, output_format=None,
col_replace_method=None):
replace_method_per_col, skip_cols = self.__get_cols_transform_method(data, replace_method, col_replace_method)
schema = data.schema
if isinstance(data.first()[1], Instance):
data = data.mapValues(lambda v: Imputer._transform_nan(v))
data.schema = schema
cols_transform_value = self.__get_cols_transform_value(data, replace_method_per_col,
replace_value=replace_value)
self.skip_cols = skip_cols
skip_cols = [get_header(data).index(v) for v in skip_cols]
if output_format is not None:
f = functools.partial(Imputer.replace_missing_value_with_cols_transform_value_format,
transform_list=cols_transform_value, missing_value_list=self.abnormal_value_set,
output_format=output_format, skip_cols=set(skip_cols))
else:
f = functools.partial(Imputer.replace_missing_value_with_cols_transform_value,
transform_list=cols_transform_value, missing_value_list=self.abnormal_value_set,
skip_cols=set(skip_cols))
transform_data = data.mapValues(f)
self.cols_replace_method = replace_method_per_col
LOGGER.info(
"finish replace missing value with cols transform value, replace method is {}".format(replace_method))
return transform_data, cols_transform_value
def __transform_replace(self, data, transform_value, replace_area, output_format, skip_cols):
skip_cols = [get_header(data).index(v) for v in skip_cols]
schema = data.schema
if isinstance(data.first()[1], Instance):
data = data.mapValues(lambda v: Imputer._transform_nan(v))
data.schema = schema
if replace_area == 'all':
if output_format is not None:
f = functools.partial(Imputer.replace_missing_value_with_replace_value_format,
replace_value=transform_value, missing_value_list=self.abnormal_value_set,
output_format=output_format)
else:
f = functools.partial(Imputer.replace_missing_value_with_replace_value,
replace_value=transform_value, missing_value_list=self.abnormal_value_set)
elif replace_area == 'col':
if output_format is not None:
f = functools.partial(Imputer.replace_missing_value_with_cols_transform_value_format,
transform_list=transform_value, missing_value_list=self.abnormal_value_set,
output_format=output_format,
skip_cols=set(skip_cols))
else:
f = functools.partial(Imputer.replace_missing_value_with_cols_transform_value,
transform_list=transform_value, missing_value_list=self.abnormal_value_set,
skip_cols=set(skip_cols))
else:
raise ValueError("Unknown replace area {} in Imputer".format(replace_area))
return data.mapValues(f)
@staticmethod
def __get_impute_number(some_data):
impute_num_list = None
data_size = None
for line in some_data:
processed_data = line[1][0]
index_list = line[1][1]
if not data_size:
if isinstance(processed_data, Instance):
data_size = data_overview.get_instance_shape(processed_data)
else:
data_size = len(processed_data)
# data_size + 1, the last element of impute_num_list used to count the number of "some_data"
impute_num_list = [0 for _ in range(data_size + 1)]
impute_num_list[data_size] += 1
for index in index_list:
impute_num_list[index] += 1
return np.array(impute_num_list)
def __get_impute_rate_from_replace_data(self, data):
impute_number_statics = data.applyPartitions(self.__get_impute_number).reduce(lambda x, y: x + y)
cols_impute_rate = impute_number_statics[:-1] / impute_number_statics[-1]
return cols_impute_rate
def fit(self, data, replace_method=None, replace_value=None, output_format=consts.ORIGIN,
col_replace_method=None):
"""
Apply imputer for input data
Parameters
----------
data: Table, each data's value should be list
replace_method: str, the strategy of imputer, like min, max, mean or designated and so on. Default None
replace_value: str, if replace_method is designated, you should assign the replace_value which will be used to replace the value in imputer_value_list
output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data
col_replace_method: dict of (col_name, replace_method), any col_name not included will take replace_method
Returns
----------
fit_data:data_instance, data after imputer
cols_transform_value: list, the replace value in each column
"""
if output_format not in self.support_output_format:
raise ValueError("Unsupport output_format:{}".format(output_format))
output_format = self.support_output_format[output_format]
if isinstance(replace_method, str):
replace_method = replace_method.lower()
if replace_method not in self.support_replace_method:
raise ValueError("Unknown replace method:{}".format(replace_method))
elif replace_method is None and col_replace_method is None:
if isinstance(data.first()[1], Instance):
replace_value = 0
else:
replace_value = '0'
elif replace_method is None and col_replace_method is not None:
LOGGER.debug(f"perform computation on selected cols only: {col_replace_method}")
else:
raise ValueError("parameter replace_method should be str or None only")
if isinstance(col_replace_method, dict):
for col_name, method in col_replace_method.items():
method = method.lower()
if method not in self.support_replace_method:
raise ValueError("Unknown replace method:{}".format(method))
col_replace_method[col_name] = method
process_data, cols_transform_value = self.__fit_replace(data, replace_method, replace_value, output_format,
col_replace_method=col_replace_method)
self.cols_fit_impute_rate = self.__get_impute_rate_from_replace_data(process_data)
process_data = process_data.mapValues(lambda v: v[0])
process_data.schema = data.schema
return process_data, cols_transform_value
def transform(self, data, transform_value, output_format=consts.ORIGIN, skip_cols=None):
"""
Transform input data using Imputer with fit results
Parameters
----------
data: Table, each data's value should be list
transform_value:
output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data
Returns
----------
transform_data:data_instance, data after transform
"""
if output_format not in self.support_output_format:
raise ValueError("Unsupport output_format:{}".format(output_format))
output_format = self.support_output_format[output_format]
skip_cols = [] if skip_cols is None else skip_cols
# Now all of replace_method is "col", remain replace_area temporarily
# replace_area = self.support_replace_area[replace_method]
replace_area = "col"
process_data = self.__transform_replace(data, transform_value, replace_area, output_format, skip_cols)
self.cols_transform_impute_rate = self.__get_impute_rate_from_replace_data(process_data)
process_data = process_data.mapValues(lambda v: v[0])
process_data.schema = data.schema
return process_data
| 18,463 | 44.590123 | 158 |
py
|
FATE
|
FATE-master/python/federatedml/feature/sampler.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import math
import random
from sklearn.utils import resample
from fate_arch.session import computing_session as session
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.model_base import ModelBase
from federatedml.param.sample_param import SampleParam
from federatedml.transfer_variable.transfer_class.sample_transfer_variable import SampleTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.schema_check import assert_schema_consistent
from fate_arch.common.base_utils import fate_uuid
class RandomSampler(object):
"""
Random Sampling Method
Parameters
----------
fraction : None or float, sampling ratio, default: 0.1
random_state: int, RandomState instance or None, optional, default: None
method: str, supported "upsample", "downsample" only in this version, default: "downsample"
"""
def __init__(self, fraction=0.1, random_state=None, method="downsample"):
self.fraction = fraction
self.random_state = random_state
self.method = method
self.tracker = None
self._summary_buf = {}
def set_tracker(self, tracker):
self.tracker = tracker
def sample(self, data_inst, sample_ids=None):
"""
Interface to call random sample method
Parameters
----------
data_inst : Table
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids to generate data
Returns
-------
new_data_inst: Table
the output sample data, same format with input
sample_ids: list, return only if sample_ids is None
"""
if sample_ids is None:
new_data_inst, sample_ids = self.__sample(data_inst)
return new_data_inst, sample_ids
else:
new_data_inst = self.__sample(data_inst, sample_ids)
return new_data_inst
def __sample(self, data_inst, sample_ids=None):
"""
Random sample method, a line's occur probability is decide by fraction
support down sample and up sample
if use down sample: should give a float ratio between [0, 1]
otherwise: should give a float ratio larger than 1.0
Parameters
----------
data_inst : Table
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids to generate data
Returns
-------
new_data_inst: Table
the output sample data, same format with input
sample_ids: list, return only if sample_ids is None
"""
LOGGER.info("start to run random sampling")
return_sample_ids = False
if self.method == "downsample":
if sample_ids is None:
return_sample_ids = True
idset = [key for key, value in data_inst.mapValues(lambda val: None).collect()]
if self.fraction < 0 or self.fraction > 1:
raise ValueError("sapmle fractions should be a numeric number between 0 and 1inclusive")
sample_num = max(1, int(self.fraction * len(idset)))
sample_ids = resample(idset,
replace=False,
n_samples=sample_num,
random_state=self.random_state)
sample_dtable = session.parallelize(zip(sample_ids, range(len(sample_ids))),
include_key=True,
partition=data_inst.partitions)
new_data_inst = data_inst.join(sample_dtable, lambda v1, v2: v1)
callback(self.tracker, "random", [Metric("count", new_data_inst.count())], summary_dict=self._summary_buf)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
elif self.method == "upsample":
data_set = list(data_inst.collect())
idset = [key for (key, value) in data_set]
id_maps = dict(zip(idset, range(len(idset))))
if sample_ids is None:
return_sample_ids = True
if self.fraction <= 0:
raise ValueError("sample fractions should be a numeric number large than 0")
sample_num = int(self.fraction * len(idset))
sample_ids = resample(idset,
replace=True,
n_samples=sample_num,
random_state=self.random_state)
new_data = []
for i in range(len(sample_ids)):
index = id_maps[sample_ids[i]]
new_data.append((i, data_set[index][1]))
new_data_inst = session.parallelize(new_data,
include_key=True,
partition=data_inst.partitions)
callback(self.tracker, "random", [Metric("count", new_data_inst.count())], summary_dict=self._summary_buf)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
else:
raise ValueError("random sampler not support method {} yet".format(self.method))
def get_summary(self):
return self._summary_buf
class StratifiedSampler(object):
"""
Stratified Sampling Method
Parameters
----------
fractions : None or list of (category, sample ratio) tuple,
sampling ratios of each category, default: None
e.g.
[(0, 0.5), (1, 0.1]) in down sample, [(1, 1.5), (0, 1.8)], where 0\1 are the the occurred category.
random_state: int, RandomState instance or None, optional, default: None
method: str, supported "upsample", "downsample", default: "downsample"
"""
def __init__(self, fractions=None, random_state=None, method="downsample"):
self.fractions = fractions
self.label_mapping = {}
self.labels = []
if fractions:
for (label, frac) in fractions:
self.label_mapping[label] = len(self.labels)
self.labels.append(label)
# self.label_mapping = [label for (label, frac) in fractions]
self.random_state = random_state
self.method = method
self.tracker = None
self._summary_buf = {}
def set_tracker(self, tracker):
self.tracker = tracker
def sample(self, data_inst, sample_ids=None):
"""
Interface to call stratified sample method
Parameters
----------
data_inst : Table
The input data
sample_ids : None or list
if None, will sample data from the class instance's key by sample parameters,
otherwise, it will be sample transform process, which means use the samples_ids to generate data
Returns
-------
new_data_inst: Table
the output sample data, same format with input
sample_ids: list, return only if sample_ids is None
"""
if sample_ids is None:
new_data_inst, sample_ids = self.__sample(data_inst)
return new_data_inst, sample_ids
else:
new_data_inst = self.__sample(data_inst, sample_ids)
return new_data_inst
def __sample(self, data_inst, sample_ids=None):
"""
Stratified sample method, a line's occur probability is decide by fractions
Input should be Table, every line should be an instance object with label
To use this method, a list of ratio should be give, and the list length
equals to the number of distinct labels
support down sample and up sample
if use down sample: should give a list of (category, ratio), where ratio is between [0, 1]
otherwise: should give a list (category, ratio), where the float ratio should no less than 1.0
Parameters
----------
data_inst : Table
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
new_data_inst: Table
the output sample data, sample format with input
sample_ids: list, return only if sample_ids is None
"""
LOGGER.info("start to run stratified sampling")
return_sample_ids = False
if self.method == "downsample":
if sample_ids is None:
idset = [[] for i in range(len(self.fractions))]
for label, fraction in self.fractions:
if fraction < 0 or fraction > 1:
raise ValueError("sapmle fractions should be a numeric number between 0 and 1inclusive")
return_sample_ids = True
for key, inst in data_inst.collect():
label = inst.label
if label not in self.label_mapping:
raise ValueError("label not specify sample rate! check it please")
idset[self.label_mapping[label]].append(key)
sample_ids = []
callback_sample_metrics = []
callback_original_metrics = []
for i in range(len(idset)):
label_name = self.labels[i]
callback_original_metrics.append(Metric(label_name, len(idset[i])))
if idset[i]:
sample_num = max(1, int(self.fractions[i][1] * len(idset[i])))
_sample_ids = resample(idset[i],
replace=False,
n_samples=sample_num,
random_state=self.random_state)
sample_ids.extend(_sample_ids)
callback_sample_metrics.append(Metric(label_name, len(_sample_ids)))
else:
callback_sample_metrics.append(Metric(label_name, 0))
random.shuffle(sample_ids)
callback(
self.tracker,
"stratified",
callback_sample_metrics,
callback_original_metrics,
self._summary_buf)
sample_dtable = session.parallelize(zip(sample_ids, range(len(sample_ids))),
include_key=True,
partition=data_inst.partitions)
new_data_inst = data_inst.join(sample_dtable, lambda v1, v2: v1)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
elif self.method == "upsample":
data_set = list(data_inst.collect())
ids = [key for (key, inst) in data_set]
id_maps = dict(zip(ids, range(len(ids))))
return_sample_ids = False
if sample_ids is None:
idset = [[] for i in range(len(self.fractions))]
for label, fraction in self.fractions:
if fraction <= 0:
raise ValueError("sapmle fractions should be a numeric number greater than 0")
for key, inst in data_set:
label = inst.label
if label not in self.label_mapping:
raise ValueError("label not specify sample rate! check it please")
idset[self.label_mapping[label]].append(key)
return_sample_ids = True
sample_ids = []
callback_sample_metrics = []
callback_original_metrics = []
for i in range(len(idset)):
label_name = self.labels[i]
callback_original_metrics.append(Metric(label_name, len(idset[i])))
if idset[i]:
sample_num = max(1, int(self.fractions[i][1] * len(idset[i])))
_sample_ids = resample(idset[i],
replace=True,
n_samples=sample_num,
random_state=self.random_state)
sample_ids.extend(_sample_ids)
callback_sample_metrics.append(Metric(label_name, len(_sample_ids)))
else:
callback_sample_metrics.append(Metric(label_name, 0))
random.shuffle(sample_ids)
callback(
self.tracker,
"stratified",
callback_sample_metrics,
callback_original_metrics,
self._summary_buf)
new_data = []
for i in range(len(sample_ids)):
index = id_maps[sample_ids[i]]
new_data.append((i, data_set[index][1]))
new_data_inst = session.parallelize(new_data,
include_key=True,
partition=data_inst.partitions)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
else:
raise ValueError("Stratified sampler not support method {} yet".format(self.method))
def get_summary(self):
return self._summary_buf
class ExactSampler(object):
"""
Exact Sampling Method
Parameters
----------
"""
def __init__(self):
self.tracker = None
self._summary_buf = {}
def set_tracker(self, tracker):
self.tracker = tracker
def get_sample_ids(self, data_inst):
original_sample_count = data_inst.count()
non_zero_data_inst = data_inst.filter(lambda k, v: v.weight > consts.FLOAT_ZERO)
non_zero_sample_count = data_inst.count()
if original_sample_count != non_zero_sample_count:
sample_diff = original_sample_count - non_zero_sample_count
LOGGER.warning(f"{sample_diff} zero-weighted sample encountered, will be discarded in final result.")
def __generate_new_ids(v):
if v.inst_id is None:
raise ValueError(f"To sample with `exact_by_weight` mode, instances must have match id."
f"Please check.")
new_key_num = math.ceil(v.weight)
new_sample_id_list = [fate_uuid() for _ in range(new_key_num)]
return new_sample_id_list
sample_ids = non_zero_data_inst.mapValues(lambda v: __generate_new_ids(v))
return sample_ids
def sample(self, data_inst, sample_ids=None):
"""
Interface to call stratified sample method
Parameters
----------
data_inst : Table
The input data
sample_ids : Table
use the samples_ids to generate data
Returns
-------
new_data_inst: Table
the output sample data, same format with input
"""
LOGGER.info("start to generate exact sampling result")
new_data_inst = self.__sample(data_inst, sample_ids)
return new_data_inst
def __sample(self, data_inst, sample_ids):
"""
Exact sample method, duplicate samples by corresponding weight:
if weight <= 0, discard sample; if round(weight) == 1, keep one,
else duplicate round(weight) copies of sample
Parameters
----------
data_inst : Table
The input data
sample_ids : Table
use the samples_ids the generate data
Returns
-------
new_data_inst: Table
the output sample data, sample format with input
"""
sample_ids_map = data_inst.join(sample_ids, lambda v, ids: (v, ids))
def __sample_new_id(k, v_id_map):
v, id_map = v_id_map
return [(new_id, v) for new_id in id_map]
new_data_inst = sample_ids_map.flatMap(functools.partial(__sample_new_id))
data_count = new_data_inst.count()
if data_count is None:
data_count = 0
LOGGER.warning(f"All data instances discarded. Please check weight.")
callback(self.tracker, "exact_by_weight", [Metric("count", data_count)], summary_dict=self._summary_buf)
return new_data_inst
def get_summary(self):
return self._summary_buf
class Sampler(ModelBase):
"""
Sampling Object
Parameters
----------
sample_param : object, self-define sample parameters,
define in federatedml.param.sample_param
"""
def __init__(self):
super(Sampler, self).__init__()
self.task_type = None
# self.task_role = None
self.flowid = 0
self.model_param = SampleParam()
def _init_model(self, sample_param):
if sample_param.mode == "random":
self.sampler = RandomSampler(sample_param.fractions,
sample_param.random_state,
sample_param.method)
self.sampler.set_tracker(self.tracker)
elif sample_param.mode == "stratified":
self.sampler = StratifiedSampler(sample_param.fractions,
sample_param.random_state,
sample_param.method)
self.sampler.set_tracker(self.tracker)
elif sample_param.mode == "exact_by_weight":
self.sampler = ExactSampler()
self.sampler.set_tracker(self.tracker)
else:
raise ValueError("{} sampler not support yet".format(sample_param.mde))
self.task_type = sample_param.task_type
def _init_role(self, component_parameters):
self.task_role = component_parameters["local"]["role"]
def sample(self, data_inst, sample_ids=None):
"""
Entry to use sample method
Parameters
----------
data_inst : Table
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
sample_data: Table
the output sample data, same format with input
"""
ori_schema = data_inst.schema
sample_data = self.sampler.sample(data_inst, sample_ids)
self.set_summary(self.sampler.get_summary())
try:
if len(sample_data) == 2:
sample_data[0].schema = ori_schema
except BaseException:
sample_data.schema = ori_schema
return sample_data
def set_flowid(self, flowid="samole"):
self.flowid = flowid
def sync_sample_ids(self, sample_ids):
transfer_inst = SampleTransferVariable()
transfer_inst.sample_ids.remote(sample_ids,
role="host",
suffix=(self.flowid,))
def recv_sample_ids(self):
transfer_inst = SampleTransferVariable()
sample_ids = transfer_inst.sample_ids.get(idx=0,
suffix=(self.flowid,))
return sample_ids
def run_sample(self, data_inst, task_type, task_role):
"""
Sample running entry
Parameters
----------
data_inst : Table
The input data
task_type : "homo" or "hetero"
if task_type is "homo", it will sample standalone
if task_type is "heterl": then sampling will be done in one side, after that
the side sync the sample ids to another side to generated the same sample result
task_role: "guest" or "host":
only consider this parameter when task_type is "hetero"
if task_role is "guest", it will firstly sample ids, and sync it to "host"
to generate data instances with sample ids
if task_role is "host": it will firstly get the sample ids result of "guest",
then generate sample data by the receiving ids
Returns
-------
sample_data_inst: Table
the output sample data, same format with input
"""
LOGGER.info("begin to run sampling process")
if task_type not in [consts.HOMO, consts.HETERO]:
raise ValueError("{} task type not support yet".format(task_type))
if task_type == consts.HOMO:
return self.sample(data_inst)[0]
elif task_type == consts.HETERO:
if task_role == consts.GUEST:
if self.model_param.mode == "exact_by_weight":
LOGGER.info("start to run exact sampling")
sample_ids = self.sampler.get_sample_ids(data_inst)
self.sync_sample_ids(sample_ids)
sample_data_inst = self.sample(data_inst, sample_ids)
else:
sample_data_inst, sample_ids = self.sample(data_inst)
self.sync_sample_ids(sample_ids)
elif task_role == consts.HOST:
sample_ids = self.recv_sample_ids()
sample_data_inst = self.sample(data_inst, sample_ids)
else:
raise ValueError("{} role not support yet".format(task_role))
return sample_data_inst
@assert_schema_consistent
def fit(self, data_inst):
return self.run_sample(data_inst, self.task_type, self.role)
def transform(self, data_inst):
return self.run_sample(data_inst, self.task_type, self.role)
def check_consistency(self):
pass
def save_data(self):
return self.data_output
def callback(tracker, method, callback_metrics, other_metrics=None, summary_dict=None):
LOGGER.debug("callback: method is {}".format(method))
if method == "random":
tracker.log_metric_data("sample_count",
"random",
callback_metrics)
tracker.set_metric_meta("sample_count",
"random",
MetricMeta(name="sample_count",
metric_type="SAMPLE_TEXT"))
summary_dict["sample_count"] = callback_metrics[0].value
elif method == "stratified":
LOGGER.debug(
"callback: name {}, namespace {}, metrics_data {}".format("sample_count", "stratified", callback_metrics))
tracker.log_metric_data("sample_count",
"stratified",
callback_metrics)
tracker.set_metric_meta("sample_count",
"stratified",
MetricMeta(name="sample_count",
metric_type="SAMPLE_TABLE"))
tracker.log_metric_data("original_count",
"stratified",
other_metrics)
tracker.set_metric_meta("original_count",
"stratified",
MetricMeta(name="original_count",
metric_type="SAMPLE_TABLE"))
summary_dict["sample_count"] = {}
for sample_metric in callback_metrics:
summary_dict["sample_count"][sample_metric.key] = sample_metric.value
summary_dict["original_count"] = {}
for sample_metric in other_metrics:
summary_dict["original_count"][sample_metric.key] = sample_metric.value
else:
LOGGER.debug(
f"callback: metrics_data {callback_metrics}, summary dict: {summary_dict}")
tracker.log_metric_data("sample_count",
"exact_by_weight",
callback_metrics)
tracker.set_metric_meta("sample_count",
"exact_by_weight",
MetricMeta(name="sample_count",
metric_type="SAMPLE_TEXT"))
summary_dict["sample_count"] = callback_metrics[0].value
| 25,786 | 34.716066 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/feature/column_expand.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from federatedml.model_base import ModelBase
from federatedml.param.column_expand_param import ColumnExpandParam
from federatedml.protobuf.generated import column_expand_meta_pb2, column_expand_param_pb2
from federatedml.util import consts, LOGGER, data_format_preprocess
DELIMITER = ","
class FeatureGenerator(object):
def __init__(self, method, append_header, fill_value):
self.method = method
self.append_header = append_header
self.fill_value = fill_value
self.append_value = self._get_append_value()
self.generator = self._get_generator()
def _get_append_value(self):
if len(self.fill_value) == 0:
return
if len(self.fill_value) == 1:
fill_value = str(self.fill_value[0])
new_features = [fill_value] * len(self.append_header)
append_value = DELIMITER.join(new_features)
else:
append_value = DELIMITER.join([str(v) for v in self.fill_value])
return append_value
def _get_generator(self):
while True:
yield self.append_value
def generate(self):
return next(self.generator)
class ColumnExpand(ModelBase):
def __init__(self):
super(ColumnExpand, self).__init__()
self.model_param = ColumnExpandParam()
self.need_run = None
self.append_header = None
self.method = None
self.fill_value = None
self.summary_obj = None
self.header = None
self.new_feature_generator = None
self.model_param_name = 'ColumnExpandParam'
self.model_meta_name = 'ColumnExpandMeta'
def _init_model(self, params):
self.model_param = params
self.need_run = params.need_run
self.append_header = params.append_header
self.method = params.method
self.fill_value = params.fill_value
self.new_feature_generator = FeatureGenerator(params.method,
params.append_header,
params.fill_value)
@staticmethod
def _append_feature(entry, append_value):
# empty content
if entry is None or len(entry) == 0:
new_entry = append_value
else:
new_entry = entry + DELIMITER + append_value
return new_entry
def _append_column(self, data):
append_value = self.new_feature_generator.generate()
new_data = data.mapValues(lambda v: ColumnExpand._append_feature(v, append_value))
new_schema = copy.deepcopy(data.schema)
header = new_schema.get("header", "")
new_schema = data_format_preprocess.DataFormatPreProcess.extend_header(new_schema, self.append_header)
if len(header) == 0:
if new_schema.get("sid", None) is not None:
new_schema["sid"] = new_schema.get("sid").strip()
if new_schema.get("meta"):
anonymous_header = new_schema.get("anonymous_header", [])
new_anonymous_header = self.anonymous_generator.extend_columns(anonymous_header,
self.append_header)
new_schema["anonymous_header"] = new_anonymous_header
new_data.schema = new_schema
new_header = new_schema.get("header")
return new_data, new_header
def _get_meta(self):
meta = column_expand_meta_pb2.ColumnExpandMeta(
append_header=self.append_header,
method=self.method,
fill_value=[str(v) for v in self.fill_value],
need_run=self.need_run
)
return meta
def _get_param(self):
param = column_expand_param_pb2.ColumnExpandParam(header=self.header)
return param
def export_model(self):
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
self.model_output = result
return result
def load_model(self, model_dict):
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
param_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
self.append_header = list(meta_obj.append_header)
self.method = meta_obj.method
self.fill_value = list(meta_obj.fill_value)
self.need_run = meta_obj.need_run
self.new_feature_generator = FeatureGenerator(self.method,
self.append_header,
self.fill_value)
self.header = param_obj.header
return
def fit(self, data):
LOGGER.info(f"Enter Column Expand fit")
# return original value if no append header provided
if self.method == consts.MANUAL and len(self.append_header) == 0:
LOGGER.info(f"Finish Column Expand fit. Original data returned.")
self.header = data.schema["header"]
return data
new_data, self.header = self._append_column(data)
LOGGER.info(f"Finish Column Expand fit")
return new_data
def transform(self, data):
LOGGER.info(f"Enter Column Expand transform")
if self.method == consts.MANUAL and len(self.append_header) == 0:
LOGGER.info(f"Finish Column Expand transform. Original data returned.")
return data
new_data, self.header = self._append_column(data)
LOGGER.info(f"Finish Column Expand transform")
return new_data
| 6,268 | 36.76506 | 110 |
py
|
FATE
|
FATE-master/python/federatedml/feature/scale.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.model_base import MetricMeta
from federatedml.feature.feature_scale.min_max_scale import MinMaxScale
from federatedml.feature.feature_scale.standard_scale import StandardScale
from federatedml.model_base import ModelBase
from federatedml.param.scale_param import ScaleParam
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.util.schema_check import assert_schema_consistent
class Scale(ModelBase):
"""
The Scale class is used to data scale. MinMaxScale and StandardScale is supported now
"""
def __init__(self):
super().__init__()
self.model_name = None
self.model_param_name = 'ScaleParam'
self.model_meta_name = 'ScaleMeta'
self.model_param = ScaleParam()
self.scale_param_obj = None
self.scale_obj = None
self.header = None
self.column_max_value = None
self.column_min_value = None
self.mean = None
self.std = None
self.scale_column_idx = None
def fit(self, data):
"""
Apply scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
data:data_instance, data after scale
scale_value_results: list, the fit results information of scale
"""
LOGGER.info("Start scale data fit ...")
if self.model_param.method == consts.MINMAXSCALE:
self.scale_obj = MinMaxScale(self.model_param)
elif self.model_param.method == consts.STANDARDSCALE:
self.scale_obj = StandardScale(self.model_param)
else:
LOGGER.warning("Scale method is {}, do nothing and return!".format(self.model_param.method))
if self.scale_obj:
fit_data = self.scale_obj.fit(data)
fit_data.schema = data.schema
self.callback_meta(metric_name="scale", metric_namespace="train",
metric_meta=MetricMeta(name="scale", metric_type="SCALE",
extra_metas={"method": self.model_param.method}))
LOGGER.info("start to get model summary ...")
self.set_summary(self.scale_obj.get_model_summary())
LOGGER.info("Finish getting model summary.")
else:
fit_data = data
LOGGER.info("End fit data ...")
return fit_data
@assert_io_num_rows_equal
@assert_schema_consistent
def transform(self, data, fit_config=None):
"""
Transform input data using scale with fit results
Parameters
----------
data: data_instance, input data
fit_config: list, the fit results information of scale
Returns
----------
transform_data:data_instance, data after transform
"""
LOGGER.info("Start scale data transform ...")
if self.model_param.method == consts.MINMAXSCALE:
self.scale_obj = MinMaxScale(self.model_param)
elif self.model_param.method == consts.STANDARDSCALE:
self.scale_obj = StandardScale(self.model_param)
self.scale_obj.set_param(self.mean, self.std)
else:
LOGGER.info("DataTransform method is {}, do nothing and return!".format(self.model_param.method))
if self.scale_obj:
self.scale_obj.header = self.header
self.scale_obj.scale_column_idx = self.scale_column_idx
self.scale_obj.set_column_range(self.column_max_value, self.column_min_value)
transform_data = self.scale_obj.transform(data)
transform_data.schema = data.schema
self.callback_meta(metric_name="scale", metric_namespace="train",
metric_meta=MetricMeta(name="scale", metric_type="SCALE",
extra_metas={"method": self.model_param.method}))
else:
transform_data = data
LOGGER.info("End transform data.")
return transform_data
def load_model(self, model_dict):
model_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
self.header = list(model_obj.header)
self.need_run = meta_obj.need_run
self.model_param.method = meta_obj.method
shape = len(self.header)
self.column_max_value = [0 for _ in range(shape)]
self.column_min_value = [0 for _ in range(shape)]
self.mean = [0 for _ in range(shape)]
self.std = [1 for _ in range(shape)]
self.scale_column_idx = []
scale_param_dict = dict(model_obj.col_scale_param)
header_index_mapping = dict(zip(self.header, range(len(self.header))))
for key, column_scale_param in scale_param_dict.items():
# index = self.header.index(key)
index = header_index_mapping[key]
self.scale_column_idx.append(index)
self.column_max_value[index] = column_scale_param.column_upper
self.column_min_value[index] = column_scale_param.column_lower
self.mean[index] = column_scale_param.mean
self.std[index] = column_scale_param.std
self.scale_column_idx.sort()
def export_model(self):
if not self.scale_obj:
if self.model_param.method == consts.MINMAXSCALE:
self.scale_obj = MinMaxScale(self.model_param)
else:
self.scale_obj = StandardScale(self.model_param)
return self.scale_obj.export_model(self.need_run)
| 6,346 | 37.466667 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_imputation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.model_base import ModelBase
from federatedml.feature.imputer import Imputer
from federatedml.protobuf.generated.feature_imputation_meta_pb2 import FeatureImputationMeta, FeatureImputerMeta
from federatedml.protobuf.generated.feature_imputation_param_pb2 import FeatureImputationParam, FeatureImputerParam
from federatedml.statistic.data_overview import get_header
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
class FeatureImputation(ModelBase):
def __init__(self):
super(FeatureImputation, self).__init__()
self.summary_obj = None
self.missing_impute_rate = None
self.skip_cols = []
self.cols_replace_method = None
self.header = None
from federatedml.param.feature_imputation_param import FeatureImputationParam
self.model_param = FeatureImputationParam()
self.model_param_name = 'FeatureImputationParam'
self.model_meta_name = 'FeatureImputationMeta'
def _init_model(self, model_param):
self.missing_fill_method = model_param.missing_fill_method
self.col_missing_fill_method = model_param.col_missing_fill_method
self.default_value = model_param.default_value
self.missing_impute = model_param.missing_impute
def get_summary(self):
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
missing_summary["skip_cols"] = self.skip_cols
return missing_summary
def load_model(self, model_dict):
param_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
self.header = param_obj.header
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value, self.skip_cols = load_feature_imputer_model(self.header,
"Imputer",
meta_obj.imputer_meta,
param_obj.imputer_param)
def save_model(self):
meta_obj, param_obj = save_feature_imputer_model(missing_fill=True,
missing_replace_method=self.missing_fill_method,
cols_replace_method=self.cols_replace_method,
missing_impute=self.missing_impute,
missing_fill_value=self.default_value,
missing_replace_rate=self.missing_impute_rate,
header=self.header,
skip_cols=self.skip_cols)
return meta_obj, param_obj
def export_model(self):
missing_imputer_meta, missing_imputer_param = self.save_model()
meta_obj = FeatureImputationMeta(need_run=self.need_run,
imputer_meta=missing_imputer_meta)
param_obj = FeatureImputationParam(header=self.header,
imputer_param=missing_imputer_param)
model_dict = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return model_dict
@assert_io_num_rows_equal
def fit(self, data):
LOGGER.info(f"Enter Feature Imputation fit")
imputer_processor = Imputer(self.missing_impute)
self.header = get_header(data)
if self.col_missing_fill_method:
for k in self.col_missing_fill_method.keys():
if k not in self.header:
raise ValueError(f"{k} not found in data header. Please check col_missing_fill_method keys.")
imputed_data, self.default_value = imputer_processor.fit(data,
replace_method=self.missing_fill_method,
replace_value=self.default_value,
col_replace_method=self.col_missing_fill_method)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate("fit")
# self.header = get_header(imputed_data)
self.cols_replace_method = imputer_processor.cols_replace_method
self.skip_cols = imputer_processor.get_skip_cols()
self.set_summary(self.get_summary())
return imputed_data
@assert_io_num_rows_equal
def transform(self, data):
LOGGER.info(f"Enter Feature Imputation transform")
imputer_processor = Imputer(self.missing_impute)
imputed_data = imputer_processor.transform(data,
transform_value=self.default_value,
skip_cols=self.skip_cols)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate("transform")
return imputed_data
def save_feature_imputer_model(missing_fill=False,
missing_replace_method=None,
cols_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
skip_cols=None):
model_meta = FeatureImputerMeta()
model_param = FeatureImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method and cols_replace_method is None:
model_meta.strategy = missing_replace_method
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
model_meta.missing_value_type.extend([type(v).__name__ for v in missing_impute])
if missing_fill_value is not None and header is not None:
fill_header = [col for col in header if col not in skip_cols]
feature_value_dict = dict(zip(fill_header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
missing_fill_value_type = [type(v).__name__ for v in missing_fill_value]
feature_value_type_dict = dict(zip(fill_header, missing_fill_value_type))
model_param.missing_replace_value_type.update(feature_value_type_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
if cols_replace_method is not None:
cols_replace_method = {k: str(v) for k, v in cols_replace_method.items()}
# model_param.cols_replace_method.update(cols_replace_method)
else:
filled_cols = set(header) - set(skip_cols)
cols_replace_method = {k: str(missing_replace_method) for k in filled_cols}
model_param.cols_replace_method.update(cols_replace_method)
model_param.skip_cols.extend(skip_cols)
return model_meta, model_param
def load_value_to_type(value, value_type):
if value is None:
loaded_value = None
elif value_type in ["int", "int64", "long", "float", "float64", "double"]:
loaded_value = getattr(np, value_type)(value)
elif value_type in ["str", "_str"]:
loaded_value = str(value)
elif value_type.lower() in ["none", "nonetype"]:
loaded_value = None
else:
raise ValueError(f"unknown value type: {value_type}")
return loaded_value
def load_feature_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = list(model_meta.missing_value)
missing_value_type = list(model_meta.missing_value_type)
missing_fill_value = model_param.missing_replace_value
missing_fill_value_type = model_param.missing_replace_value_type
skip_cols = list(model_param.skip_cols)
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = [load_value_to_type(missing_value[i],
missing_value_type[i]) for i in range(len(missing_value))]
if missing_fill_value:
missing_fill_value = [load_value_to_type(missing_fill_value.get(head),
missing_fill_value_type.get(head)) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value, skip_cols
| 10,431 | 46.20362 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/feature/one_hot_encoder.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import math
import numpy as np
from federatedml.model_base import ModelBase
from federatedml.param.onehot_encoder_param import OneHotEncoderParam
from federatedml.protobuf.generated import onehot_param_pb2, onehot_meta_pb2
from federatedml.statistic.data_overview import get_header
from federatedml.util import LOGGER
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
MODEL_PARAM_NAME = 'OneHotParam'
MODEL_META_NAME = 'OneHotMeta'
MODEL_NAME = 'OneHotEncoder'
class OneHotInnerParam(object):
def __init__(self):
self.col_name_maps = {}
self.header = []
self.transform_indexes = []
self.transform_names = []
self.result_header = []
self.transform_index_set = set()
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_result_header(self, result_header: list or tuple):
self.result_header = result_header.copy()
def set_transform_all(self):
self.transform_indexes = [i for i in range(len(self.header))]
self.transform_names = self.header
self.transform_index_set = set(self.transform_indexes)
def add_transform_indexes(self, transform_indexes):
if transform_indexes is None:
return
for idx in transform_indexes:
if idx >= len(self.header):
LOGGER.warning("Adding a index that out of header's bound")
continue
if idx not in self.transform_index_set:
self.transform_indexes.append(idx)
self.transform_index_set.add(idx)
self.transform_names.append(self.header[idx])
def add_transform_names(self, transform_names):
if transform_names is None:
return
for col_name in transform_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning("Adding a col_name that is not exist in header")
continue
if idx not in self.transform_index_set:
self.transform_indexes.append(idx)
self.transform_index_set.add(idx)
self.transform_names.append(self.header[idx])
class TransferPair(object):
def __init__(self, name):
self.name = name
self._values = set()
self._transformed_headers = {}
self._ordered_header = None
def add_value(self, value):
if value in self._values:
return
self._values.add(value)
if len(self._values) > consts.ONE_HOT_LIMIT:
raise ValueError(f"Input data should not have more than {consts.ONE_HOT_LIMIT} "
f"possible value when doing one-hot encode")
# self._transformed_headers[value] = self.__encode_new_header(value)
# LOGGER.debug(f"transformed_header: {self._transformed_headers}")
@property
def values(self):
if self._ordered_header is None:
return list(self._values)
if len(self._ordered_header) != len(self._values):
raise ValueError("Indicated order header length is not equal to value set,"
f" ordered_header: {self._ordered_header}, values: {self._values}")
return self._ordered_header
def set_ordered_header(self, ordered_header):
self._ordered_header = ordered_header
@property
def transformed_headers(self):
return [self._transformed_headers[x] for x in self.values]
def query_name_by_value(self, value):
return self._transformed_headers.get(value, None)
def encode_new_headers(self):
for value in self._values:
self._transformed_headers[value] = "_".join(map(str, [self.name, value]))
def __encode_new_header(self, value):
return '_'.join([str(x) for x in [self.name, value]])
class OneHotEncoder(ModelBase):
def __init__(self):
super(OneHotEncoder, self).__init__()
self.col_maps = {}
self.schema = {}
self.output_data = None
self.model_param = OneHotEncoderParam()
self.inner_param: OneHotInnerParam = None
def _init_model(self, model_param):
self.model_param = model_param
# self.cols_index = model_param.cols
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
self.check_schema_content(data_instances.schema)
def fit(self, data_instances):
self._init_params(data_instances)
self._abnormal_detection(data_instances)
f1 = functools.partial(self.record_new_header,
inner_param=self.inner_param)
self.col_maps = data_instances.applyPartitions(f1).reduce(self.merge_col_maps)
LOGGER.debug("Before set_schema in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
for col_name, pair_obj in self.col_maps.items():
pair_obj.encode_new_headers()
self._transform_schema()
data_instances = self.transform(data_instances)
LOGGER.debug("After transform in fit, schema is : {}, header: {}".format(self.schema,
self.inner_param.header))
return data_instances
@assert_io_num_rows_equal
def transform(self, data_instances):
self._init_params(data_instances)
LOGGER.debug("In OneHot transform, ori_header: {}, transfered_header: {}".format(
self.inner_param.header, self.inner_param.result_header
))
# one_data = data_instances.first()[1].features
# LOGGER.debug("Before transform, data is : {}".format(one_data))
f = functools.partial(self.transfer_one_instance,
col_maps=self.col_maps,
header=self.inner_param.header,
result_header=self.inner_param.result_header,
result_header_index_mapping=dict(zip(self.inner_param.result_header,
range(len(self.inner_param.result_header)))))
new_data = data_instances.mapValues(f)
self.set_schema(new_data)
self.add_summary('transferred_dimension', len(self.inner_param.result_header))
LOGGER.debug(f"Final summary: {self.summary()}")
# one_data = new_data.first()[1].features
# LOGGER.debug("transfered data is : {}".format(one_data))
return new_data
def _transform_schema(self):
header = self.inner_param.header.copy()
LOGGER.debug("[Result][OneHotEncoder]Before one-hot, "
"data_instances schema is : {}".format(self.inner_param.header))
result_header = []
for col_name in header:
if col_name not in self.col_maps:
result_header.append(col_name)
continue
pair_obj = self.col_maps[col_name]
new_headers = pair_obj.transformed_headers
result_header.extend(new_headers)
self.inner_param.set_result_header(result_header)
LOGGER.debug("[Result][OneHotEncoder]After one-hot, data_instances schema is :"
" {}".format(header))
def _init_params(self, data_instances):
if len(self.schema) == 0:
self.schema = data_instances.schema
if self.inner_param is not None:
return
self.inner_param = OneHotInnerParam()
# self.schema = data_instances.schema
LOGGER.debug("In _init_params, schema is : {}".format(self.schema))
header = get_header(data_instances)
self.add_summary("original_dimension", len(header))
self.inner_param.set_header(header)
if self.model_param.transform_col_indexes == -1:
self.inner_param.set_transform_all()
else:
self.inner_param.add_transform_indexes(self.model_param.transform_col_indexes)
self.inner_param.add_transform_names(self.model_param.transform_col_names)
@staticmethod
def record_new_header(data, inner_param: OneHotInnerParam):
"""
Generate a new schema based on data value. Each new value will generate a new header.
Returns
-------
col_maps: a dict in which keys are original header, values are dicts. The dicts in value
e.g.
cols_map = {"x1": {1 : "x1_1"},
...}
"""
col_maps = {}
for col_name in inner_param.transform_names:
col_maps[col_name] = TransferPair(col_name)
# col_idx_name_pairs = list(zip(inner_param.transform_indexes, inner_param.transform_names))
for _, instance in data:
feature = instance.features
for col_idx, col_name in zip(inner_param.transform_indexes, inner_param.transform_names):
pair_obj = col_maps.get(col_name)
feature_value = feature[col_idx]
if not isinstance(feature_value, str):
feature_value = math.ceil(feature_value)
if feature_value != feature[col_idx]:
raise ValueError("Onehot input data support integer or string only")
pair_obj.add_value(feature_value)
return col_maps
@staticmethod
def encode_new_header(col_name, feature_value):
return '_'.join([str(x) for x in [col_name, feature_value]])
@staticmethod
def merge_col_maps(col_map1, col_map2):
if col_map1 is None and col_map2 is None:
return None
if col_map1 is None:
return col_map2
if col_map2 is None:
return col_map1
for col_name, pair_obj in col_map2.items():
if col_name not in col_map1:
col_map1[col_name] = pair_obj
continue
else:
col_1_obj = col_map1[col_name]
for value in pair_obj.values:
col_1_obj.add_value(value)
return col_map1
@staticmethod
def transfer_one_instance(instance, col_maps, header, result_header, result_header_index_mapping):
new_inst = instance.copy(exclusive_attr={"features"})
feature = instance.features
# _transformed_value = {}
new_feature = [0] * len(result_header)
for idx, col_name in enumerate(header):
value = feature[idx]
if col_name in result_header_index_mapping:
result_idx = result_header_index_mapping.get(col_name)
new_feature[result_idx] = value
# _transformed_value[col_name] = value
else:
pair_obj = col_maps.get(col_name, None)
if not pair_obj:
continue
new_col_name = pair_obj.query_name_by_value(value)
if new_col_name is None:
continue
result_idx = result_header_index_mapping.get(new_col_name)
new_feature[result_idx] = 1
# _transformed_value[new_col_name] = 1
feature_array = np.array(new_feature)
new_inst.features = feature_array
return new_inst
def set_schema(self, data_instance):
derived_header = dict()
for col_name, pair_obj in self.col_maps.items():
derived_header[col_name] = pair_obj.transformed_headers
self.schema["anonymous_header"] = self.anonymous_generator.generate_derived_header(
self.schema["header"],
self.schema["anonymous_header"],
derived_header)
self.schema['header'] = self.inner_param.result_header
data_instance.schema = self.schema
def _get_meta(self):
meta_protobuf_obj = onehot_meta_pb2.OneHotMeta(transform_col_names=self.inner_param.transform_names,
header=self.inner_param.header,
need_run=self.need_run)
return meta_protobuf_obj
def _get_param(self):
pb_dict = {}
for col_name, pair_obj in self.col_maps.items():
values = [str(x) for x in pair_obj.values]
value_dict_obj = onehot_param_pb2.ColsMap(values=values,
transformed_headers=pair_obj.transformed_headers)
pb_dict[col_name] = value_dict_obj
result_obj = onehot_param_pb2.OneHotParam(col_map=pb_dict,
result_header=self.inner_param.result_header)
return result_obj
def export_model(self):
if self.model_output is not None:
LOGGER.debug("Model output is : {}".format(self.model_output))
return self.model_output
if self.inner_param is None:
self.inner_param = OneHotInnerParam()
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
return result
def load_model(self, model_dict):
self._parse_need_run(model_dict, MODEL_META_NAME)
model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME)
model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME)
self.model_output = {
MODEL_META_NAME: model_meta,
MODEL_PARAM_NAME: model_param
}
self.inner_param = OneHotInnerParam()
self.inner_param.set_header(list(model_meta.header))
self.inner_param.add_transform_names(list(model_meta.transform_col_names))
col_maps = dict(model_param.col_map)
self.col_maps = {}
for col_name, cols_map_obj in col_maps.items():
if col_name not in self.col_maps:
self.col_maps[col_name] = TransferPair(col_name)
pair_obj = self.col_maps[col_name]
for feature_value in list(cols_map_obj.values):
try:
feature_value = eval(feature_value)
except NameError:
pass
pair_obj.add_value(feature_value)
pair_obj.encode_new_headers()
self.inner_param.set_result_header(list(model_param.result_header))
| 15,474 | 38.27665 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/feature/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from federatedml.feature.data_transform import DataTransform
# from federatedml.feature.sampler import RandomSampler
# from federatedml.feature.sampler import StratifiedSampler
# from federatedml.feature.sampler import Sampler
# from federatedml.feature.instance import Instance
# from federatedml.feature.sparse_vector import SparseVector
# from federatedml.feature.imputer import ImputerProcess
# from federatedml.feature.quantile import Quantile
# from federatedml.feature.imputer import ImputerProcess
# from federatedml.feature.min_max_scale import MinMaxScale
# from federatedml.feature.standard_scale import StandardScale
#
#
# __all__ = ['Instance', 'Quantile', "SparseVector",
# "MinMaxScale", "ImputerProcess", "StandardScale",
# "DataTransform", "RandomSampler",
# "StratifiedSampler", "Sampler",]
| 1,461 | 42 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/instance.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
class Instance(object):
"""
Instance object use in all algorithm module
Parameters
----------
inst_id : int, the id of the instance, reserved fields in this version
weight: float, the weight of the instance
feature : object, ndarray or SparseVector Object in this version
label: None of float, data label
"""
def __init__(self, inst_id=None, weight=None, features=None, label=None):
self.inst_id = inst_id
self.weight = weight
self.features = features
self.label = label
def set_weight(self, weight=1.0):
self.weight = weight
def set_label(self, label=1):
self.label = label
def set_feature(self, features):
self.features = features
def copy(self, exclusive_attr=None):
keywords = {"inst_id", "weight", "features", "label"}
if exclusive_attr:
keywords -= set(exclusive_attr)
copy_obj = Instance()
for key in keywords:
if key in exclusive_attr:
continue
attr = getattr(self, key)
setattr(copy_obj, key, attr)
return copy_obj
@property
def with_inst_id(self):
return self.inst_id is not None
@staticmethod
def is_instance():
return True
| 2,139 | 26.792208 | 80 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/filter_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import random
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.util import LOGGER
class BaseFilterMethod(object):
"""
Use for filter columns
"""
def __init__(self, filter_param):
self.selection_properties: SelectionProperties = None
self.use_anonymous = False
self._parse_filter_param(filter_param)
@property
def feature_values(self):
return self.selection_properties.feature_values
def set_use_anonymous(self):
self.use_anonymous = True
def fit(self, data_instances, suffix):
"""
Filter data_instances for the specified columns
Parameters
----------
data_instances : Table,
Input data
suffix : string,
Use for transfer_variables
Returns
-------
A list of index of columns left.
"""
raise NotImplementedError("Should not call this function directly")
def _parse_filter_param(self, filter_param):
raise NotImplementedError("Should not call this function directly")
def set_selection_properties(self, selection_properties):
# LOGGER.debug(f"In set_selection_properties, header: {selection_properties.header}")
self.selection_properties = selection_properties
def _keep_one_feature(self, pick_high=True, selection_properties=None, feature_values=None):
"""
Make sure at least one feature can be left after filtering.
Parameters
----------
pick_high: bool
Set when none of value left, choose the highest one or lowest one. True means highest one while
False means lowest one.
"""
if selection_properties is None:
selection_properties = self.selection_properties
if feature_values is None:
feature_values = self.feature_values
if len(selection_properties.left_col_indexes) > 0:
return
LOGGER.info("All features has been filtered, keep one without satisfying all the conditions")
# LOGGER.debug("feature values: {}, select_col_names: {}, left_col_names: {}".format(
# self.feature_values, self.selection_properties.select_col_names, self.selection_properties.left_col_names
# ))
# random pick one
if len(feature_values) == 0:
left_col_name = random.choice(selection_properties.select_col_names)
else:
result = sorted(feature_values.items(), key=operator.itemgetter(1), reverse=pick_high)
left_col_name = result[0][0]
# LOGGER.debug("feature values: {}, left_col_name: {}".format(self.feature_values, left_col_name))
selection_properties.add_left_col_name(left_col_name)
def set_statics_obj(self, statics_obj):
# Re-write if needed
pass
def set_transfer_variable(self, transfer_variable):
# Re-write if needed
pass
def set_binning_obj(self, binning_model):
# Re-write if needed
pass
def set_component_properties(self, cpp):
# Re-write if needed
pass
def set_iso_model(self, model):
pass
def get_meta_obj(self):
raise NotImplementedError("Should not call this function directly")
| 4,006 | 30.551181 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/outlier_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.param.feature_selection_param import OutlierColsSelectionParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.statics import MultivariateStatisticalSummary
class OutlierFilter(BaseFilterMethod):
"""
Filter the columns if coefficient of variance is less than a threshold.
"""
def __init__(self, filter_param: OutlierColsSelectionParam):
super().__init__(filter_param)
self.statics_obj = None
def _parse_filter_param(self, filter_param: OutlierColsSelectionParam):
self.percentile = filter_param.percentile
self.upper_threshold = filter_param.upper_threshold
def set_statics_obj(self, statics_obj):
self.statics_obj = statics_obj
def fit(self, data_instances, suffix):
if self.statics_obj is None:
self.statics_obj = MultivariateStatisticalSummary(data_instances)
quantile_points = self.statics_obj.get_quantile_point(self.percentile)
for col_name in self.selection_properties.select_col_names:
quantile_value = quantile_points.get(col_name)
if quantile_value < self.upper_threshold:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, quantile_value)
self._keep_one_feature(pick_high=True)
return self
# def get_meta_obj(self, meta_dicts):
# result = feature_selection_meta_pb2.OutlierColsSelectionMeta(percentile=self.percentile,
# upper_threshold=self.upper_threshold)
# meta_dicts['outlier_meta'] = result
# return meta_dicts
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta()
return result
| 2,578 | 39.296875 | 108 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/filter_factory.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from federatedml.feature.feature_selection.correlation_filter import CorrelationFilter
from federatedml.feature.feature_selection.iso_model_filter import IsoModelFilter, FederatedIsoModelFilter
from federatedml.feature.feature_selection.iv_filter import IVFilter
from federatedml.feature.feature_selection.manually_filter import ManuallyFilter
from federatedml.feature.feature_selection.percentage_value_filter import PercentageValueFilter
from federatedml.param import feature_selection_param
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.util import LOGGER
from federatedml.util import consts
def _obtain_single_param(input_param, idx):
this_param = copy.deepcopy(input_param)
for attr, value in input_param.__dict__.items():
if value is not None:
value = value[idx]
setattr(this_param, attr, value)
this_param.check()
return this_param
def get_filter(filter_name, model_param: FeatureSelectionParam, role=consts.GUEST, model=None, idx=0):
LOGGER.debug(f"Getting filter name: {filter_name}")
if filter_name == consts.UNIQUE_VALUE:
unique_param = model_param.unique_param
new_param = feature_selection_param.CommonFilterParam(
metrics=consts.STANDARD_DEVIATION,
filter_type='threshold',
take_high=True,
threshold=unique_param.eps
)
new_param.check()
iso_model = model.isometric_models.get(consts.STATISTIC_MODEL)
if iso_model is None:
raise ValueError("Missing statistic model to use unique filter")
return IsoModelFilter(new_param, iso_model)
elif filter_name == consts.IV_VALUE_THRES:
iv_value_param = model_param.iv_value_param
iv_param = feature_selection_param.IVFilterParam(
filter_type='threshold',
threshold=iv_value_param.value_threshold,
host_thresholds=iv_value_param.host_thresholds,
select_federated=not iv_value_param.local_only
)
iv_param.check()
iso_model = model.isometric_models.get(consts.BINNING_MODEL)
if iso_model is None:
raise ValueError("Missing binning model to use iv filter")
return IVFilter(iv_param, iso_model,
role=role, cpp=model.component_properties)
elif filter_name == consts.IV_PERCENTILE:
iv_percentile_param = model_param.iv_percentile_param
iv_param = feature_selection_param.IVFilterParam(
filter_type='top_percentile',
threshold=iv_percentile_param.percentile_threshold,
select_federated=not iv_percentile_param.local_only
)
iv_param.check()
iso_model = model.isometric_models.get(consts.BINNING_MODEL)
if iso_model is None:
raise ValueError("Missing binning model to use iv filter")
return IVFilter(iv_param, iso_model,
role=role, cpp=model.component_properties)
elif filter_name == consts.IV_TOP_K:
iv_top_k_param = model_param.iv_top_k_param
iv_param = feature_selection_param.IVFilterParam(
filter_type='top_k',
threshold=iv_top_k_param.k,
select_federated=not iv_top_k_param.local_only
)
iv_param.check()
iso_model = model.isometric_models.get(consts.BINNING_MODEL)
if iso_model is None:
raise ValueError("Missing binning model to use iv filter")
return IVFilter(iv_param, iso_model,
role=role, cpp=model.component_properties)
elif filter_name == consts.COEFFICIENT_OF_VARIATION_VALUE_THRES:
variance_coe_param = model_param.variance_coe_param
coe_param = feature_selection_param.CommonFilterParam(
metrics=consts.COEFFICIENT_OF_VARIATION,
filter_type='threshold',
take_high=True,
threshold=variance_coe_param.value_threshold
)
coe_param.check()
iso_model = model.isometric_models.get(consts.STATISTIC_MODEL)
if iso_model is None:
raise ValueError("Missing statistic model to use coef_of_var filter")
return IsoModelFilter(coe_param, iso_model)
elif filter_name == consts.OUTLIER_COLS:
outlier_param = model_param.outlier_param
new_param = feature_selection_param.CommonFilterParam(
metrics=str(int(outlier_param.percentile * 100)) + "%",
filter_type='threshold',
take_high=False,
threshold=outlier_param.upper_threshold
)
new_param.check()
iso_model = model.isometric_models.get(consts.STATISTIC_MODEL)
if iso_model is None:
raise ValueError("Missing statistic model to use outlier filter")
return IsoModelFilter(new_param, iso_model)
# outlier_param = model_param.outlier_param
# return OutlierFilter(outlier_param)
elif filter_name == consts.MANUALLY_FILTER:
manually_param = model_param.manually_param
filter = ManuallyFilter(manually_param)
if model_param.use_anonymous:
filter.set_use_anonymous()
return filter
elif filter_name == consts.PERCENTAGE_VALUE:
percentage_value_param = model_param.percentage_value_param
return PercentageValueFilter(percentage_value_param)
elif filter_name == consts.IV_FILTER:
iv_param = model_param.iv_param
this_param = _obtain_single_param(iv_param, idx)
iso_model = model.isometric_models.get(consts.BINNING_MODEL)
if iso_model is None:
raise ValueError("Missing iv model to use iv filter")
return IVFilter(this_param, iso_model,
role=role, cpp=model.component_properties)
elif filter_name == consts.HETERO_SBT_FILTER:
sbt_param = model_param.sbt_param
this_param = _obtain_single_param(sbt_param, idx)
iso_model = model.isometric_models.get(consts.HETERO_SBT)
if iso_model is None:
raise ValueError("Missing sbt model for use sbt filter")
return IsoModelFilter(this_param, iso_model)
elif filter_name == consts.HETERO_FAST_SBT_FILTER:
sbt_param = model_param.sbt_param
this_param = _obtain_single_param(sbt_param, idx)
if consts.HETERO_FAST_SBT_LAYERED in model.isometric_models and \
consts.HETERO_FAST_SBT_MIX in model.isometric_models:
raise ValueError("Should not provide layered and mixed fast sbt model simultaneously")
elif consts.HETERO_FAST_SBT_LAYERED in model.isometric_models:
iso_model = model.isometric_models.get(consts.HETERO_FAST_SBT_LAYERED)
return IsoModelFilter(this_param, iso_model)
elif consts.HETERO_FAST_SBT_MIX in model.isometric_models:
iso_model = model.isometric_models.get(consts.HETERO_FAST_SBT_MIX)
return IsoModelFilter(this_param, iso_model)
else:
raise ValueError("Missing Fast sbt model")
elif filter_name == consts.HOMO_SBT_FILTER:
sbt_param = model_param.sbt_param
this_param = _obtain_single_param(sbt_param, idx)
iso_model = model.isometric_models.get(consts.HOMO_SBT)
if iso_model is None:
raise ValueError("Missing sbt model to use sbt filter")
return IsoModelFilter(this_param, iso_model)
elif filter_name == consts.STATISTIC_FILTER:
statistic_param = model_param.statistic_param
this_param = _obtain_single_param(statistic_param, idx)
iso_model = model.isometric_models.get(consts.STATISTIC_MODEL)
if iso_model is None:
raise ValueError("Missing statistic model to use statistic filter")
return IsoModelFilter(this_param, iso_model)
elif filter_name == consts.PSI_FILTER:
psi_param = model_param.psi_param
this_param = _obtain_single_param(psi_param, idx)
iso_model = model.isometric_models.get(consts.PSI)
if iso_model is None:
raise ValueError("Missing psi model to use psi filter")
return IsoModelFilter(this_param, iso_model)
elif filter_name == consts.VIF_FILTER:
vif_param = model_param.vif_param
this_param = _obtain_single_param(vif_param, idx)
iso_model = model.isometric_models.get("HeteroPearson")
if iso_model is None:
raise ValueError("Missing Hetero Pearson model to use VIF filter")
return IsoModelFilter(this_param, iso_model)
elif filter_name == consts.CORRELATION_FILTER:
correlation_param = model_param.correlation_param
if correlation_param.sort_metric == 'iv':
external_model = model.isometric_models.get(consts.BINNING_MODEL)
if external_model is None:
raise ValueError("Missing binning model to use correlation filter")
else:
raise ValueError(f"sort_metric: {correlation_param.sort_metric} is not supported")
iso_model = model.isometric_models.get("HeteroPearson")
correlation_model = iso_model.get_metric_info(consts.PEARSON)
if iso_model is None:
raise ValueError("Missing Hetero Pearson model to use correlation filter")
return CorrelationFilter(correlation_param, external_model=external_model,
correlation_model=correlation_model, role=role,
cpp=model.component_properties)
else:
raise ValueError("filter method: {} does not exist".format(filter_name))
| 10,258 | 44.595556 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/selection_properties.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from federatedml.protobuf.generated import feature_selection_param_pb2
from federatedml.util import LOGGER
class SelectionProperties(object):
def __init__(self):
self.header = []
self.anonymous_header = []
self.anonymous_col_name_maps = {}
self.col_name_maps = {}
self.last_left_col_indexes = []
self.select_col_indexes = []
self.select_col_names = []
# self.anonymous_select_col_names = []
self.left_col_indexes_added = set()
self.left_col_indexes = []
self.left_col_names = []
# self.anonymous_left_col_names = []
self.feature_values = {}
def load_properties_with_new_header(self, header, feature_values, left_cols_obj, new_header_dict):
self.set_header(list(new_header_dict.values()))
self.set_last_left_col_indexes([header.index(i) for i in left_cols_obj.original_cols])
self.add_select_col_names([new_header_dict.get(col) for col in left_cols_obj.original_cols])
for col_name, _ in feature_values.items():
self.add_feature_value(new_header_dict.get(col_name), feature_values.get(col_name))
left_cols_dict = dict(left_cols_obj.left_cols)
# LOGGER.info(f"left_cols_dict: {left_cols_dict}")
for col_name, _ in left_cols_dict.items():
if left_cols_dict.get(col_name):
self.add_left_col_name(new_header_dict.get(col_name))
# LOGGER.info(f"select properties all left cols names: {self.all_left_col_names}")
return self
def load_properties(self, header, feature_values, left_cols_obj):
self.set_header(header)
self.set_last_left_col_indexes([header.index(i) for i in left_cols_obj.original_cols])
self.add_select_col_names(left_cols_obj.original_cols)
for col_name, _ in feature_values.items():
self.add_feature_value(col_name, feature_values[col_name])
left_cols_dict = dict(left_cols_obj.left_cols)
for col_name, _ in left_cols_dict.items():
if left_cols_dict[col_name]:
self.add_left_col_name(col_name)
return self
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_anonymous_header(self, anonymous_header):
self.anonymous_header = anonymous_header
if self.anonymous_header:
for idx, col_name in enumerate(self.anonymous_header):
self.anonymous_col_name_maps[col_name] = idx
def set_last_left_col_indexes(self, left_cols):
self.last_left_col_indexes = left_cols.copy()
def set_select_all_cols(self):
self.select_col_indexes = [i for i in range(len(self.header))]
self.select_col_names = self.header
# self.anonymous_select_col_names = self.anonymous_header
def add_select_col_indexes(self, select_col_indexes):
last_left_col_indexes = set(self.last_left_col_indexes)
added_select_col_index = set(self.select_col_indexes)
for idx in select_col_indexes:
if idx >= len(self.header):
LOGGER.warning("Adding an index out of header's bound")
continue
if idx not in last_left_col_indexes:
continue
if idx not in added_select_col_index:
self.select_col_indexes.append(idx)
self.select_col_names.append(self.header[idx])
# self.anonymous_select_col_names.append(self.anonymous_header[idx])
added_select_col_index.add(idx)
def add_select_col_names(self, select_col_names):
last_left_col_indexes = set(self.last_left_col_indexes)
added_select_col_indexes = set(self.select_col_indexes)
for col_name in select_col_names:
idx = self.col_name_maps.get(col_name)
if idx is None:
LOGGER.warning("Adding a col_name that does not exist in header")
continue
if idx not in last_left_col_indexes:
continue
if idx not in added_select_col_indexes:
self.select_col_indexes.append(idx)
self.select_col_names.append(col_name)
# self.anonymous_select_col_names.append(self.anonymous_header[idx])
added_select_col_indexes.add(idx)
def add_left_col_name(self, left_col_name):
idx = self.col_name_maps.get(left_col_name)
if idx is None:
LOGGER.warning("Adding a col_name that does not exist in header")
return
if idx not in self.left_col_indexes_added:
self.left_col_indexes.append(idx)
self.left_col_indexes_added.add(idx)
self.left_col_names.append(left_col_name)
# self.anonymous_left_col_names.append(self.anonymous_header[idx])
def add_feature_value(self, col_name, feature_value):
self.feature_values[col_name] = feature_value
@property
def all_left_col_indexes(self):
result = []
select_col_indexes = set(self.select_col_indexes)
left_col_indexes = set(self.left_col_indexes)
for idx in self.last_left_col_indexes:
if (idx not in select_col_indexes) or (idx in left_col_indexes):
result.append(idx)
# elif idx in left_col_indexes:
# result.append(idx)
return result
@property
def all_left_col_names(self):
return [self.header[x] for x in self.all_left_col_indexes]
@property
def all_left_anonymous_col_names(self):
return [self.anonymous_header[x] for x in self.all_left_col_indexes]
@property
def left_col_dicts(self):
return {x: True for x in self.all_left_col_names}
@property
def last_left_col_names(self):
return [self.header[x] for x in self.last_left_col_indexes]
class CompletedSelectionResults(object):
def __init__(self):
self.header = []
self.anonymous_header = []
self.col_name_maps = {}
self.__select_col_names = None
self.filter_results = []
self.__guest_pass_filter_nums = {}
self.__host_pass_filter_nums_list = []
self.all_left_col_indexes = []
def set_header(self, header):
self.header = header
for idx, col_name in enumerate(self.header):
self.col_name_maps[col_name] = idx
def set_anonymous_header(self, anonymous_header):
self.anonymous_header = anonymous_header
def set_select_col_names(self, select_col_names):
if self.__select_col_names is None:
self.__select_col_names = select_col_names
def get_select_col_names(self):
return self.__select_col_names
def set_all_left_col_indexes(self, left_indexes):
self.all_left_col_indexes = left_indexes.copy()
@property
def all_left_col_names(self):
return [self.header[x] for x in self.all_left_col_indexes]
@property
def all_left_anonymous_col_names(self):
return [self.anonymous_header[x] for x in self.all_left_col_indexes]
def add_filter_results(self, filter_name, select_properties: SelectionProperties, host_select_properties=None):
# self.all_left_col_indexes = select_properties.all_left_col_indexes.copy()
self.set_all_left_col_indexes(select_properties.all_left_col_indexes)
if filter_name == 'conclusion':
return
if host_select_properties is None:
host_select_properties = []
host_feature_values = []
host_left_cols = []
for idx, host_result in enumerate(host_select_properties):
host_all_left_col_names = set(host_result.all_left_col_names)
if idx >= len(self.__host_pass_filter_nums_list):
_host_pass_filter_nums = {}
self.__host_pass_filter_nums_list.append(_host_pass_filter_nums)
else:
_host_pass_filter_nums = self.__host_pass_filter_nums_list[idx]
host_last_left_col_names = host_result.last_left_col_names
for col_name in host_last_left_col_names:
_host_pass_filter_nums.setdefault(col_name, 0)
if col_name in host_all_left_col_names:
_host_pass_filter_nums[col_name] += 1
feature_value_pb = feature_selection_param_pb2.FeatureValue(feature_values=host_result.feature_values)
host_feature_values.append(feature_value_pb)
left_col_pb = feature_selection_param_pb2.LeftCols(original_cols=host_last_left_col_names,
left_cols=host_result.left_col_dicts)
host_left_cols.append(left_col_pb)
# for col_name in select_properties.all_left_col_names:
self_all_left_col_names = set(select_properties.all_left_col_names)
self_last_left_col_names = select_properties.last_left_col_names
for col_name in self_last_left_col_names:
self.__guest_pass_filter_nums.setdefault(col_name, 0)
if col_name in self_all_left_col_names:
self.__guest_pass_filter_nums[col_name] += 1
left_cols_pb = feature_selection_param_pb2.LeftCols(original_cols=self_last_left_col_names,
left_cols=select_properties.left_col_dicts)
this_filter_result = {
'feature_values': select_properties.feature_values,
'host_feature_values': host_feature_values,
'left_cols': left_cols_pb,
'host_left_cols': host_left_cols,
'filter_name': filter_name
}
this_filter_result = feature_selection_param_pb2.FeatureSelectionFilterParam(**this_filter_result)
self.filter_results.append(this_filter_result)
def get_sorted_col_names(self):
result = sorted(self.__guest_pass_filter_nums.items(), key=operator.itemgetter(1), reverse=True)
return [x for x, _ in result]
def get_host_sorted_col_names(self):
result = []
for pass_name_dict in self.__host_pass_filter_nums_list:
sorted_list = sorted(pass_name_dict.items(), key=operator.itemgetter(1), reverse=True)
result.append([x for x, _ in sorted_list])
return result
| 11,068 | 41.248092 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/correlation_filter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.feature.feature_selection.iso_model_filter import FederatedIsoModelFilter
from federatedml.param.feature_selection_param import CorrelationFilterParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.util.component_properties import ComponentProperties
from federatedml.util import LOGGER
class CorrelationFilter(FederatedIsoModelFilter):
"""
filter the columns if all values in this feature is the same
"""
def __init__(self, filter_param: CorrelationFilterParam, external_model, correlation_model,
role, cpp: ComponentProperties):
super().__init__(filter_param, iso_model=external_model, role=role, cpp=cpp)
self.correlation_model = correlation_model
# self.host_party_id = int(self.correlation_model.parties[1][1:-1].split(",")[1])
self.host_party_id = None
self.take_high = False
if self.select_federated:
self.host_party_id = int(self.correlation_model.parties[1][1:-1].split(",")[1])
def _parse_filter_param(self, filter_param: CorrelationFilterParam):
self.sort_metric = filter_param.sort_metric
self.threshold = filter_param.threshold
self.select_federated = filter_param.select_federated
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta(
metrics="correlation",
filter_type="Sort and filter by threshold",
threshold=self.threshold,
select_federated=self.select_federated
)
return result
def _guest_fit(self, suffix):
sorted_idx, col_names = self.__sort_features()
filtered_name, host_filtered_name = self.__select_corr(sorted_idx, col_names)
# LOGGER.debug(f"select_col_name: {self.selection_properties.select_col_names}")
for name in self.selection_properties.select_col_names:
if name not in filtered_name:
self.selection_properties.add_left_col_name(name)
self.selection_properties.add_feature_value(name, 0.0)
else:
self.selection_properties.add_feature_value(name, filtered_name[name])
if self.select_federated:
host_id = self.cpp.host_party_idlist.index(self.host_party_id)
host_prop = self.host_selection_properties[host_id]
for name in host_prop.select_col_names:
if name not in host_filtered_name:
host_prop.add_left_col_name(name)
host_prop.add_feature_value(name, 0.0)
else:
host_prop.add_feature_value(name, host_filtered_name[name])
self._keep_one_feature(pick_high=self.take_high, selection_properties=host_prop,
feature_values=[])
if self.select_federated:
self.sync_obj.sync_select_results(self.host_selection_properties, suffix=suffix)
def __select_corr(self, sorted_idx, col_names):
guest_col_names = self.correlation_model.col_names
host_col_names = self.correlation_model.host_col_names
filtered_name = {}
host_filtered_name = {}
for idx in sorted_idx:
party, name = col_names[idx]
if name in filtered_name:
continue
if party == 'guest':
row = guest_col_names.index(name)
corr = self.correlation_model.local_corr[row, :]
# local vars will not be filtered
filtered_name = self.__get_filtered_column(corr, filtered_name, guest_col_names, name, True)
if self.select_federated:
corr = self.correlation_model.corr[row, :]
host_filtered_name = self.__get_filtered_column(corr, host_filtered_name,
host_col_names, name, False)
# LOGGER.debug(f"guest_col_name: {name}, filtered_name: {filtered_name}, "
# f"host_filtered_name: {host_filtered_name}")
else:
column = host_col_names.index(name)
corr = self.correlation_model.corr[:, column]
filtered_name = self.__get_filtered_column(corr, filtered_name, guest_col_names, name, False)
# LOGGER.debug(f"host_col_name: {name}, filtered_name: {filtered_name}, "
# f"host_filtered_name: {host_filtered_name}")
return filtered_name, host_filtered_name
def __get_filtered_column(self, corr, filtered_name, all_names, curt_name, is_local=True):
for idx, v in enumerate(corr):
if np.abs(v) > self.threshold:
_name = all_names[idx]
if is_local and _name == curt_name:
continue
if _name in filtered_name:
continue
else:
# only record first corr value > threshold
filtered_name[_name] = v
return filtered_name
def __sort_features(self):
metric_info = self.iso_model.get_metric_info(self.sort_metric)
all_feature_values = metric_info.get_partial_values(self.selection_properties.select_col_names)
col_names = [("guest", x) for x in self.selection_properties.select_col_names]
if self.select_federated:
assert len(self.correlation_model.parties) == 2, "Correlation Model should contain host info" \
"for select_federated in correlation_filter"
LOGGER.debug(f"correlation_parties: {self.correlation_model.parties}")
host_id = self.cpp.host_party_idlist.index(self.host_party_id)
host_property = self.host_selection_properties[host_id]
all_feature_values.extend(metric_info.get_partial_values(
host_property.select_col_names, self.host_party_id
))
col_names.extend([(self.host_party_id, x) for x in host_property.select_col_names])
sorted_idx = np.argsort(all_feature_values)[::-1]
return sorted_idx, col_names
| 6,824 | 48.456522 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/manually_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.param.feature_selection_param import ManuallyFilterParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.data_overview import look_up_names_from_header
class ManuallyFilter(BaseFilterMethod):
def __init__(self, filter_param: ManuallyFilterParam):
self.filter_out_indexes = None
self.filter_out_names = None
self.filter_param = None
super().__init__(filter_param)
def _parse_filter_param(self, filter_param):
self.filter_param = filter_param
def _transfer_params(self):
header = self.selection_properties.header
anonymous_header = self.selection_properties.anonymous_header
col_name_maps = self.selection_properties.col_name_maps
if (self.filter_param.filter_out_indexes or self.filter_param.filter_out_names) is not None:
if self.use_anonymous:
self.filter_out_names = look_up_names_from_header(self.filter_param.filter_out_names,
anonymous_header,
header)
else:
self.filter_out_names = self.filter_param.filter_out_names
self.filter_out_indexes = self.filter_param.filter_out_indexes
elif (self.filter_param.left_col_indexes or self.filter_param.left_col_names) is not None:
filter_out_set = set([i for i in range(len(header))])
if self.filter_param.left_col_indexes is not None:
filter_out_set = filter_out_set.difference(self.filter_param.left_col_indexes)
if self.filter_param.left_col_names is not None:
if self.use_anonymous:
left_col_names = look_up_names_from_header(self.filter_param.left_col_names,
anonymous_header,
header)
else:
left_col_names = self.filter_param.left_col_names
left_idx = [col_name_maps.get(name) for name in left_col_names]
filter_out_set = filter_out_set.difference(left_idx)
self.filter_out_indexes = list(filter_out_set)
if self.filter_out_indexes is None:
self.filter_out_indexes = []
if self.filter_out_names is None:
self.filter_out_names = []
def fit(self, data_instances, suffix):
self._transfer_params()
all_filter_out_names = []
filter_out_indexes_set = set(self.filter_out_indexes)
filter_out_names_set = set(self.filter_out_names)
for col_idx, col_name in zip(self.selection_properties.select_col_indexes,
self.selection_properties.select_col_names):
# LOGGER.debug("Col_idx: {}, col_names: {}, filter_out_indexes: {}, filter_out_names: {}".format(
# col_idx, col_name, self.filter_out_indexes, self.filter_out_names
# ))
if col_idx not in filter_out_indexes_set and col_name not in filter_out_names_set:
self.selection_properties.add_left_col_name(col_name)
else:
all_filter_out_names.append(col_name)
self._keep_one_feature()
self.filter_out_names = all_filter_out_names
# LOGGER.debug(f"filter out names are: {self.filter_out_names}")
return self
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta()
return result
| 4,350 | 46.813187 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/unique_value_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.param.feature_selection_param import UniqueValueParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.statics import MultivariateStatisticalSummary
import math
class UniqueValueFilter(BaseFilterMethod):
"""
filter the columns if all values in this feature is the same
"""
def __init__(self, filter_param: UniqueValueParam):
super().__init__(filter_param)
self.statics_obj = None
def _parse_filter_param(self, filter_param):
self.eps = filter_param.eps
def set_statics_obj(self, statics_obj):
self.statics_obj = statics_obj
def fit(self, data_instances, suffix):
if self.statics_obj is None:
self.statics_obj = MultivariateStatisticalSummary(data_instances)
max_values = self.statics_obj.get_max()
min_values = self.statics_obj.get_min()
for col_name in self.selection_properties.select_col_names:
min_max_diff = math.fabs(max_values[col_name] - min_values[col_name])
if min_max_diff >= self.eps:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, min_max_diff)
self._keep_one_feature(pick_high=True)
return self
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta()
return result
| 2,165 | 35.1 | 81 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/iso_model_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.feature.feature_selection.model_adapter.isometric_model import IsometricModel
from federatedml.framework.hetero.sync import selection_info_sync
from federatedml.param.feature_selection_param import CommonFilterParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.data_overview import look_up_names_from_header
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.component_properties import ComponentProperties
class IsoModelFilter(BaseFilterMethod):
def __init__(self, filter_param, iso_model: IsometricModel):
self.iso_model = iso_model
super(IsoModelFilter, self).__init__(filter_param)
def _parse_filter_param(self, filter_param: CommonFilterParam):
self.metrics = filter_param.metrics[0]
LOGGER.debug(f"In parse filter param, metrics: {filter_param.metrics}")
if self.metrics not in self.iso_model.valid_value_name:
raise ValueError(f"Metric {self.metrics} is not in this model's valid_value_name")
self.filter_type = filter_param.filter_type[0]
self.take_high = filter_param.take_high[0]
self.threshold = filter_param.threshold[0]
self.select_federated = filter_param.select_federated[0]
self._validation_check()
def get_meta_obj(self):
LOGGER.debug(f"metrics: {self.metrics}, filter_type: {self.filter_type},"
f"take_high: {self.take_high}, threshold: {self.threshold},"
f"select_federated: {self.select_federated}")
result = feature_selection_meta_pb2.FilterMeta(
metrics=self.metrics,
filter_type=self.filter_type,
take_high=self.take_high,
threshold=self.threshold,
select_federated=self.select_federated
)
return result
def _validation_check(self):
if self.metrics == consts.IV:
if not self.take_high:
raise ValueError("Iv filter should take higher iv columns")
def fit(self, data_instances, suffix):
m = self.metrics
metric_info = self.iso_model.get_metric_info(m)
all_feature_values = metric_info.get_partial_values(
self.selection_properties.select_col_names)
col_names = [x for x in self.selection_properties.select_col_names]
# all_feature_values = np.array(metric_info.values)
# col_names = [x for x in metric_info.col_names]
filter_type = self.filter_type
take_high = self.take_high
threshold = self.threshold
if filter_type == "threshold":
results = self._threshold_fit(all_feature_values, threshold, take_high)
elif filter_type == "top_k":
results = self._top_k_fit(all_feature_values, threshold, take_high)
else:
results = self._percentile_fit(all_feature_values, threshold, take_high)
results = set(results)
for v_idx, v in enumerate(all_feature_values):
col_name = col_names[v_idx]
self.selection_properties.add_feature_value(col_name, v)
if v_idx in results:
self.selection_properties.add_left_col_name(col_name)
self._keep_one_feature(pick_high=take_high)
return self
def _threshold_fit(self, values, threshold, take_high):
result = []
for idx, v in enumerate(values):
if take_high:
if v >= threshold:
result.append(idx)
else:
if v <= threshold:
result.append(idx)
return result
def _top_k_fit(self, values, k, take_high):
sorted_idx = np.argsort(values)
result = []
if take_high:
for idx in sorted_idx[::-1]:
result.append(idx)
if len(result) >= k:
break
else:
for idx in sorted_idx:
result.append(idx)
if len(result) >= k:
break
return result
def _percentile_fit(self, values, percent, take_high):
k = math.ceil(percent * len(values))
return self._top_k_fit(values, k, take_high)
class FederatedIsoModelFilter(IsoModelFilter):
def __init__(self, filter_param, iso_model: IsometricModel, role, cpp: ComponentProperties):
self.role = role
self.cpp = cpp
super(FederatedIsoModelFilter, self).__init__(filter_param, iso_model)
self.sync_obj = None
@property
def party_id(self):
return self.cpp.local_partyid
def _parse_filter_param(self, filter_param: CommonFilterParam):
super()._parse_filter_param(filter_param)
if filter_param.host_thresholds is None:
self.host_threshold = None
else:
self.host_threshold = filter_param.host_thresholds[0]
if isinstance(self.host_threshold, float):
self.host_threshold = [self.host_threshold] * len(self.cpp.host_party_idlist)
if self.role == consts.GUEST:
self.host_selection_properties = []
def fit(self, data_instances, suffix):
self._sync_select_info(suffix)
if self.role == consts.GUEST:
self._guest_fit(suffix)
else:
self._host_fit(suffix)
self._keep_one_feature(pick_high=self.take_high)
return self
def _guest_fit(self, suffix):
m = self.metrics
# for idx, m in enumerate(self.metrics):
value_obj = self.iso_model.get_metric_info(m)
self._fix_with_value_obj(value_obj, suffix)
def _fix_with_value_obj(self, value_obj, suffix):
all_feature_values = value_obj.get_partial_values(self.selection_properties.select_col_names)
col_names = [("guest", x) for x in self.selection_properties.select_col_names]
if self.select_federated:
# all_feature_values, col_names = value_obj.union_result()
host_threshold = {}
for idx, host_party_id in enumerate(value_obj.host_party_ids):
# host_id = self.cpp.host_party_idlist.index(int(host_party_id))
host_property = self.host_selection_properties[idx]
all_feature_values.extend(value_obj.get_partial_values(
host_property.select_col_names, host_party_id
))
col_names.extend([(host_party_id, x) for x in host_property.select_col_names])
if self.host_threshold is None:
host_threshold[host_party_id] = self.threshold
else:
host_threshold[host_party_id] = self.host_threshold[idx]
else:
# all_feature_values = value_obj.get_values()
# col_names = value_obj.get_col_names()
host_threshold = None
filter_type = self.filter_type
take_high = self.take_high
threshold = self.threshold
if filter_type == "threshold":
results = self._threshold_fit(all_feature_values, threshold,
take_high, host_threshold, col_names)
elif filter_type == "top_k":
results = self._top_k_fit(all_feature_values, threshold, take_high)
else:
results = self._percentile_fit(all_feature_values, threshold, take_high)
# LOGGER.debug(f"results length: {len(results)}, type of results is: {type(results)}")
results = set(results)
# LOGGER.debug(f"filter_type: {filter_type}, results: {results}, "
# f"all_feature_values: {all_feature_values}")
for v_idx, v in enumerate(all_feature_values):
# LOGGER.debug(f"all_feature_values: {all_feature_values},"
# f"col_names: {col_names},"
# f"v_idx: {v_idx}")
col_name = col_names[v_idx]
if col_name[0] == consts.GUEST:
self.selection_properties.add_feature_value(col_name[1], v)
if v_idx in results:
self.selection_properties.add_left_col_name(col_name[1])
else:
# LOGGER.debug(f"host_selection_propertied: {self.host_selection_properties}")
# LOGGER.debug(f" col_name: {col_name}")
host_idx = self.cpp.host_party_idlist.index(int(col_name[0]))
# LOGGER.debug(f"header: {self.host_selection_properties[host_idx].header}")
host_prop = self.host_selection_properties[host_idx]
# if len(self.metrics) == 1:
host_prop.add_feature_value(col_name[1], v)
if v_idx in results:
host_prop.add_left_col_name(col_name[1])
for host_prop in self.host_selection_properties:
self._keep_one_feature(pick_high=self.take_high, selection_properties=host_prop,
feature_values=host_prop.feature_values)
if self.select_federated:
self.sync_obj.sync_select_results(self.host_selection_properties, suffix=suffix)
def _threshold_fit(self, values, threshold, take_high,
host_thresholds=None, col_names=None):
if host_thresholds is None:
return super()._threshold_fit(values, threshold, take_high)
result = []
for idx, v in enumerate(values):
party = col_names[idx][0]
if party == 'guest':
thres = threshold
else:
thres = host_thresholds[party]
if take_high:
if v >= thres:
result.append(idx)
else:
if v <= thres:
result.append(idx)
return result
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta(
metrics=self.metrics,
filter_type=self.filter_type,
take_high=self.take_high,
threshold=self.threshold,
select_federated=self.select_federated
)
return result
def _host_fit(self, suffix):
if not self.select_federated:
for col_name in self.selection_properties.last_left_col_names:
self.selection_properties.add_left_col_name(col_name)
return
# if self.selection_properties.anonymous_header:
self.sync_obj.sync_select_results(self.selection_properties,
header=self.selection_properties.header,
anonymous_header=self.selection_properties.anonymous_header,
suffix=suffix)
# LOGGER.debug("In fit selected result, left_col_names: {}".format(self.selection_properties.left_col_names))
return self
def _sync_select_info(self, suffix):
if not self.select_federated:
return
if self.role == consts.GUEST:
assert isinstance(self.sync_obj, selection_info_sync.Guest)
self.host_selection_properties = self.sync_obj.sync_select_cols(suffix=suffix)
else:
# if self.selection_properties.anonymous_header:
encoded_names = look_up_names_from_header(self.selection_properties.select_col_names,
self.selection_properties.header,
self.selection_properties.anonymous_header)
"""else:
encoded_names = []
for col_name in self.selection_properties.select_col_names:
fid = self.selection_properties.col_name_maps[col_name]
encoded_names.append(anonymous_generator.generate_anonymous(
fid=fid, role=self.role, party_id=self.party_id
))
"""
# LOGGER.debug(f"Before send, encoded_names: {encoded_names},"
# f"select_names: {self.selection_properties.select_col_names}")
self.sync_obj.sync_select_cols(encoded_names, suffix=suffix)
def set_transfer_variable(self, transfer_variable):
if self.role == consts.GUEST:
self.sync_obj = selection_info_sync.Guest()
else:
self.sync_obj = selection_info_sync.Host()
self.sync_obj.register_selection_trans_vars(transfer_variable)
| 13,217 | 41.63871 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/percentage_value_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import operator
from federatedml.feature.fate_element_type import NoneType
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.param.feature_selection_param import PercentageValueParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.data_overview import is_sparse_data
class PercentageValueFilter(BaseFilterMethod):
"""
filter the columns if all values in this feature is the same
"""
def __init__(self, filter_param: PercentageValueParam):
super().__init__(filter_param)
def _parse_filter_param(self, filter_param):
self.upper_pct = filter_param.upper_pct
def fit(self, data_instances, suffix):
k = 1
while (1 / k) > self.upper_pct:
k += 1
total_num = data_instances.count()
thres_num = math.ceil(total_num * self.upper_pct)
mode_res = self._find_kth_mode(data_instances, k)
for col_index, mode_info in mode_res.items():
col_name = self.selection_properties.header[col_index]
if mode_info is None:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, False)
continue
mode_num = mode_info[1]
if mode_num <= thres_num:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, False)
else:
self.selection_properties.add_feature_value(col_name, True)
self._keep_one_feature(pick_high=True)
return self
def _find_kth_mode(self, data_instances, k):
"""
Find 1/k mode. If there is a mode that number of which is larger than 1/k of total nums, return this mode and
its percentage. If there is not, return None, None.
Parameters
----------
data_instances: Table
Original data
k: int
"""
is_sparse = is_sparse_data(data_instances)
def find_mode_candidate(instances, select_cols):
"""
Find at most k - 1 mode candidates.
Parameters
----------
instances: Data generator
Original data
k: int
select_cols: list
Indicates columns that need to be operated.
is_sparse: bool
Whether input data format is sparse
Returns
-------
all_candidates: dict
Each key is col_index and value is a list that contains mode candidates.
"""
all_candidates = {}
for col_index in select_cols:
all_candidates[col_index] = {}
for _, instant in instances:
for col_index in select_cols:
candidate_dict = all_candidates[col_index]
if is_sparse:
feature_value = instant.features.get_data(col_index, 0)
else:
feature_value = instant.features[col_index]
if isinstance(feature_value, float):
feature_value = round(feature_value, 8)
if feature_value in candidate_dict:
candidate_dict[feature_value] += 1
elif len(candidate_dict) < k - 1:
candidate_dict[feature_value] = 1
else:
to_delete_col = []
for key in candidate_dict:
candidate_dict[key] -= 1
if candidate_dict[key] == 0:
to_delete_col.append(key)
for d_k in to_delete_col:
del candidate_dict[d_k]
for col_index, candidate_dict in all_candidates.items():
candidate_dict = {key: 0 for key, _ in candidate_dict.items()}
all_candidates[col_index] = candidate_dict
return all_candidates
def merge_mode_candidate(d1, d2):
assert len(d1) == len(d2)
for col_idx, d in d1.items():
d.update(d2[col_idx])
return d1
def merge_candidates_num(candi_1, candi_2):
assert len(candi_1) == len(candi_2)
for col_idx, candidate_dict in candi_1.items():
candi_dict_2 = candi_2[col_idx]
for feature_value, num in candi_dict_2.items():
if feature_value in candidate_dict:
candidate_dict[feature_value] += num
else:
candidate_dict[feature_value] = num
return candi_1
def static_candidates_num(instances, select_cols, all_candidates):
"""
Static number of candidates
Parameters
----------
instances: Data generator
Original data
select_cols: list
Indicates columns that need to be operated.
all_candidates: dict
Each key is col_index and value is a list that contains mode candidates.
"""
for _, instant in instances:
for col_index in select_cols:
candidate_dict = all_candidates[col_index]
if is_sparse:
feature_value = instant.features.get_data(col_index, NoneType())
else:
feature_value = instant.features[col_index]
if isinstance(feature_value, float):
feature_value = round(feature_value, 8)
if feature_value in candidate_dict:
candidate_dict[feature_value] += 1
# mode_result = {}
# for col_index, candidate_dict in all_candidates.items():
# feature_value, nums = sorted(candidate_dict.items(), key=operator.itemgetter(1), reverse=False)[0]
# mode_result[col_index] = (feature_value, nums)
return all_candidates
find_func = functools.partial(find_mode_candidate,
select_cols=self.selection_properties.select_col_indexes)
all_candidates = data_instances.applyPartitions(find_func).reduce(merge_mode_candidate)
static_func = functools.partial(static_candidates_num,
select_cols=self.selection_properties.select_col_indexes,
all_candidates=all_candidates)
mode_candidate_statics = data_instances.applyPartitions(static_func).reduce(merge_candidates_num)
result = {}
for col_index, candidate_dict in mode_candidate_statics.items():
if len(candidate_dict) > 0:
res = sorted(candidate_dict.items(), key=operator.itemgetter(1), reverse=True)[0]
else:
res = None
result[col_index] = res
return result
# def get_meta_obj(self, meta_dicts):
# result = feature_selection_meta_pb2.PercentageValueFilterMeta(upper_pct=self.upper_pct)
# meta_dicts['pencentage_value_meta'] = result
# return meta_dicts
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta()
return result
| 8,195 | 38.215311 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/variance_coe_filter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.param.feature_selection_param import VarianceOfCoeSelectionParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.util import consts
class VarianceCoeFilter(BaseFilterMethod):
"""
Filter the columns if coefficient of variance is less than a threshold.
"""
def __init__(self, filter_param: VarianceOfCoeSelectionParam):
super().__init__(filter_param)
self.statics_obj = None
def _parse_filter_param(self, filter_param):
self.value_threshold = filter_param.value_threshold
def set_statics_obj(self, statics_obj):
self.statics_obj = statics_obj
def fit(self, data_instances, suffix):
if self.statics_obj is None:
self.statics_obj = MultivariateStatisticalSummary(data_instances)
std_var = self.statics_obj.get_std_variance()
mean_value = self.statics_obj.get_mean()
for col_name in self.selection_properties.select_col_names:
s_v = std_var.get(col_name)
m_v = mean_value.get(col_name)
if math.fabs(m_v) < consts.FLOAT_ZERO:
m_v = consts.FLOAT_ZERO
coeff_of_var = math.fabs(s_v / m_v)
if coeff_of_var >= self.value_threshold:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, coeff_of_var)
self._keep_one_feature(pick_high=True)
return self
# def get_meta_obj(self, meta_dicts):
# result = feature_selection_meta_pb2.VarianceOfCoeSelectionMeta(value_threshold=self.value_threshold)
# meta_dicts['variance_coe_meta'] = result
# return meta_dicts
def get_meta_obj(self):
result = feature_selection_meta_pb2.FilterMeta()
return result
| 2,651 | 36.352113 | 110 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/iv_filter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.feature.feature_selection.iso_model_filter import FederatedIsoModelFilter
from federatedml.feature.feature_selection.model_adapter import isometric_model
from federatedml.param.feature_selection_param import IVFilterParam
from federatedml.util import LOGGER
class IVFilter(FederatedIsoModelFilter):
def _parse_filter_param(self, filter_param: IVFilterParam):
super()._parse_filter_param(filter_param)
self.merge_type = filter_param.mul_class_merge_type[0]
def _merge_iv(self):
metric_infos = self.iso_model.get_all_metric_info()
col_names = metric_infos[0].col_names
host_party_ids = metric_infos[0].host_party_ids
host_col_names = metric_infos[0].host_col_names
values = metric_infos[0].values
host_values = np.array(metric_infos[0].host_values)
if self.merge_type == "max":
for m in metric_infos[1:]:
values = np.maximum(values, m.values)
host_values = np.maximum(host_values, m.host_values)
elif self.merge_type == "min":
for m in metric_infos[1:]:
values = np.maximum(values, m.values)
host_values = np.maximum(host_values, m.host_values)
else:
for m in metric_infos[1:]:
values += m.values
host_values += m.host_values
"""for m in metric_infos[1:]:
if self.merge_type == "max":
values = np.maximum(values, m.values)
host_values = np.maximum(host_values, m.host_values)
elif self.merge_type == "min":
values = np.minimum(values, m.values)
host_values = np.minimum(host_values, m.host_values)
else:
values += m.values
host_values += m.host_values
"""
if self.merge_type == 'average':
values /= len(metric_infos)
host_values /= len(metric_infos)
# LOGGER.debug(f"After merge, iv_values: {values}, host_values: {host_values},"
# f" merge_type:{self.merge_type}")
single_info = isometric_model.SingleMetricInfo(
values=values,
col_names=col_names,
host_party_ids=host_party_ids,
host_values=host_values,
host_col_names=host_col_names
)
return single_info
def _guest_fit(self, suffix):
# for idx, m in enumerate(self.metrics):
value_obj = self._merge_iv()
self._fix_with_value_obj(value_obj, suffix)
| 3,207 | 39.1 | 90 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/outlier_filter_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.feature.feature_selection.model_adapter.adapter_factory import adapter_factory
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.feature.hetero_feature_selection.base_feature_selection import BaseHeteroFeatureSelection
from federatedml.feature.instance import Instance
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.param.statistics_param import StatisticsParam
from federatedml.statistic.data_statistics import DataStatistics
from federatedml.util import consts
class TestVarianceCoeFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, feature_num, partition):
data = []
header = [str(i) for i in range(feature_num)]
anonymous_header = ["guest_9999_x" + str(i) for i in range(feature_num)]
# col_2 = np.random.rand(data_num)
col_data = []
for _ in range(feature_num - 1):
col_1 = np.random.randn(data_num)
col_data.append(col_1)
outlier_data = list(np.random.randn(int(data_num * 0.8)))
outlier_data.extend(100 * np.ones(data_num - int(data_num * 0.8)))
col_data.append(outlier_data)
for key in range(data_num):
data.append((key, Instance(features=np.array([col[key] for col in col_data]))))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header,
"anonymous_header": anonymous_header
}
self.header = header
return result
def test_filter_logic(self):
data_table = self.gen_data(1000, 10, 4)
select_param = FeatureSelectionParam()
select_param.outlier_param.percentile = 0.9
select_param.outlier_param.upper_threshold = 99
selection_obj = self._make_selection_obj(data_table)
filter_obj = get_filter(consts.OUTLIER_COLS, select_param, model=selection_obj)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
self.assertEqual(res_select_properties.all_left_col_names, [self.header[x] for x in range(9)])
self.assertEqual(len(res_select_properties.all_left_col_names), 9)
def _make_selection_obj(self, data_table):
statistics_param = StatisticsParam(statistics="90%")
statistics_param.check()
print(statistics_param.statistics)
test_obj = DataStatistics()
test_obj.model_param = statistics_param
test_obj._init_model(statistics_param)
test_obj.fit(data_table)
adapter = adapter_factory(consts.STATISTIC_MODEL)
meta_obj = test_obj.export_model()['StatisticMeta']
param_obj = test_obj.export_model()['StatisticParam']
iso_model = adapter.convert(meta_obj, param_obj)
selection_obj = BaseHeteroFeatureSelection()
selection_obj.isometric_models = {consts.STATISTIC_MODEL: iso_model}
return selection_obj
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 4,352 | 39.305556 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/manually_filter_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.util import consts
class TestManuallyFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, feature_num, partition):
data = []
header = [str(i) for i in range(feature_num)]
col_data = []
for _ in range(feature_num):
col_1 = np.random.randn(data_num)
col_data.append(col_1)
for key in range(data_num):
data.append((key, np.array([col[key] for col in col_data])))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header}
self.header = header
return result
def test_filter_logic(self):
data_table = self.gen_data(1000, 10, 48)
select_param = FeatureSelectionParam()
select_param.manually_param.filter_out_indexes = [9, 8, 7]
select_param.manually_param.filter_out_names = ['6', '5', '4']
filter_obj = get_filter(consts.MANUALLY_FILTER, select_param)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
result = ['0', '1', '2', '3']
self.assertEqual(res_select_properties.all_left_col_names, result)
def test_left_logic(self):
data_table = self.gen_data(1000, 10, 48)
select_param = FeatureSelectionParam()
select_param.manually_param.left_col_indexes = [0, 1]
select_param.manually_param.left_col_names = ['3', '2']
filter_obj = get_filter(consts.MANUALLY_FILTER, select_param)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
result = ['0', '1', '2', '3']
self.assertEqual(res_select_properties.all_left_col_names, result)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 3,518 | 38.988636 | 90 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/percentage_value_filter_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from fate_arch.session import computing_session as session
import numpy as np
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.util import consts
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
import uuid
import random
class TestPercentageValueFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, partition):
col_data = []
header = [str(i) for i in range(6)]
mode_num = int(0.8 * data_num)
other_num = data_num - mode_num
col_1 = np.array([1] * mode_num + [0] * other_num)
random.shuffle(col_1)
col_data.append(col_1)
mode_num = int(0.799 * data_num)
other_num = data_num - mode_num
col_1 = np.array([1] * mode_num + [0] * other_num)
random.shuffle(col_1)
col_data.append(col_1)
mode_num = int(0.801 * data_num)
other_num = data_num - mode_num
col_1 = np.array([1] * mode_num + [0] * other_num)
random.shuffle(col_1)
col_data.append(col_1)
col_2 = np.random.randn(data_num)
col_data.append(col_2)
mode_num = int(0.2 * data_num)
other_num = data_num - mode_num
col_1 = np.array([0.5] * mode_num + list(np.random.randn(other_num)))
print("col 0.5 count: {}".format(list(col_1).count(0.5)))
random.shuffle(col_1)
col_data.append(col_1)
mode_num = int(0.79 * data_num)
other_num = data_num - mode_num
col_1 = np.array([0.5] * mode_num + list(np.random.randn(other_num)))
random.shuffle(col_1)
col_data.append(col_1)
data = []
data_2 = []
for key in range(data_num):
features = np.array([col[key] for col in col_data])
inst = Instance(inst_id=key, features=features, label=key % 2)
data.append((key, inst))
sparse_vec = SparseVector(indices=[i for i in range(len(features))], data=features, shape=len(features))
inst_2 = Instance(inst_id=key, features=sparse_vec, label=key % 2)
data_2.append((key, inst_2))
result = session.parallelize(data, include_key=True, partition=partition)
result_2 = session.parallelize(data_2, include_key=True, partition=partition)
result.schema = {'header': header}
result_2.schema = {'header': header}
self.header = header
return result, result_2
def test_percentage_value_logic(self):
data_table, data_table_2 = self.gen_data(1000, 48)
self._run_filter(data_table)
self._run_filter(data_table_2)
def _run_filter(self, data_table):
select_param = FeatureSelectionParam()
select_param.percentage_value_param.upper_pct = 0.2
filter_obj = get_filter(consts.PERCENTAGE_VALUE, select_param)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
left_cols = [3, 4]
self.assertEqual(res_select_properties.all_left_col_names, [self.header[x] for x in left_cols])
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 4,439 | 36.627119 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/variance_coe_filter_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.feature.hetero_feature_selection.base_feature_selection import BaseHeteroFeatureSelection
from federatedml.param.statistics_param import StatisticsParam
from federatedml.statistic.data_statistics import DataStatistics
from federatedml.feature.instance import Instance
from federatedml.util import consts
from federatedml.feature.feature_selection.model_adapter.adapter_factory import adapter_factory
class TestVarianceCoeFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, feature_num, partition):
data = []
header = [str(i) for i in range(feature_num)]
anonymous_header = ["guest_9999_x" + str(i) for i in range(feature_num)]
# col_2 = np.random.rand(data_num)
col_data = []
for _ in range(feature_num - 1):
while True:
col_1 = np.random.rand(data_num)
if np.mean(col_1) != 0:
break
col_data.append(col_1)
col_data.append(10 * np.ones(data_num))
for key in range(data_num):
data.append((key, Instance(features=np.array([col[key] for col in col_data]))))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header,
"anonymous_header": anonymous_header}
self.header = header
self.coe_list = []
for col in col_data:
self.coe_list.append(np.std(col) / np.mean(col))
return result
def test_filter_logic(self):
data_table = self.gen_data(1000, 10, 4)
select_param = FeatureSelectionParam()
select_param.variance_coe_param.value_threshold = 0.1
selection_obj = self._make_selection_obj(data_table)
filter_obj = get_filter(consts.COEFFICIENT_OF_VARIATION_VALUE_THRES, select_param,
model=selection_obj)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
result = [self.header[idx] for idx, x in enumerate(self.coe_list)
if x >= select_param.variance_coe_param.value_threshold]
self.assertEqual(res_select_properties.all_left_col_names, result)
self.assertEqual(len(res_select_properties.all_left_col_names), 9)
def _make_selection_obj(self, data_table):
statistics_param = StatisticsParam(statistics="summary")
statistics_param.check()
print(statistics_param.statistics)
test_obj = DataStatistics()
test_obj.model_param = statistics_param
test_obj._init_model(statistics_param)
test_obj.fit(data_table)
adapter = adapter_factory(consts.STATISTIC_MODEL)
meta_obj = test_obj.export_model()['StatisticMeta']
param_obj = test_obj.export_model()['StatisticParam']
iso_model = adapter.convert(meta_obj, param_obj)
selection_obj = BaseHeteroFeatureSelection()
selection_obj.isometric_models = {consts.STATISTIC_MODEL: iso_model}
return selection_obj
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 4,539 | 39.535714 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/test/unique_value_filter_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.feature.feature_selection.model_adapter.adapter_factory import adapter_factory
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.feature.hetero_feature_selection.base_feature_selection import BaseHeteroFeatureSelection
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.param.statistics_param import StatisticsParam
from federatedml.statistic.data_statistics import DataStatistics
from federatedml.util import consts
from federatedml.feature.instance import Instance
class TestUniqueValueFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, partition):
data = []
header = [str(i) for i in range(2)]
anonymous_header = ["guest_9999_x" + str(i) for i in range(2)]
col_1 = np.random.randint(100) * np.ones(data_num)
col_2 = np.random.randn(data_num)
for key in range(data_num):
data.append((key, Instance(features=np.array([col_1[key], col_2[key]]))))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header,
"anonymous_header": anonymous_header}
self.header = header
return result
def test_unique_logic(self):
data_table = self.gen_data(1000, 48)
select_param = FeatureSelectionParam()
selection_obj = self._make_selection_obj(data_table)
filter_obj = get_filter(consts.UNIQUE_VALUE, select_param, model=selection_obj)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
self.assertEqual(res_select_properties.all_left_col_names, [self.header[1]])
def _make_selection_obj(self, data_table):
statistics_param = StatisticsParam(statistics="summary")
statistics_param.check()
print(statistics_param.statistics)
test_obj = DataStatistics()
test_obj.model_param = statistics_param
test_obj._init_model(statistics_param)
test_obj.fit(data_table)
adapter = adapter_factory(consts.STATISTIC_MODEL)
meta_obj = test_obj.export_model()['StatisticMeta']
param_obj = test_obj.export_model()['StatisticParam']
iso_model = adapter.convert(meta_obj, param_obj)
selection_obj = BaseHeteroFeatureSelection()
selection_obj.isometric_models = {consts.STATISTIC_MODEL: iso_model}
return selection_obj
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 3,819 | 39.210526 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/tree_adapter.py
|
import numpy as np
from federatedml.feature.feature_selection.model_adapter import isometric_model
from federatedml.feature.feature_selection.model_adapter.adapter_base import BaseAdapter
from federatedml.util import consts
def feature_importance_converter(model_meta, model_param):
# extract feature importance from model param
fid_mapping = dict(model_param.feature_name_fid_mapping)
feat_importance_list = list(model_param.feature_importances)
fids = list(fid_mapping.keys())
cols_names, importance_val = [], []
for feat_importance in feat_importance_list:
site_name = feat_importance.sitename
fid = feat_importance.fid
importance = feat_importance.importance
feature_name = fid_mapping[fid]
cols_names.append(feature_name)
importance_val.append(importance)
for fid in fids:
if fid_mapping[fid] not in cols_names:
cols_names.append(fid_mapping[fid])
importance_val.append(0)
single_info = isometric_model.SingleMetricInfo(
values=np.array(importance_val),
col_names=cols_names
)
result = isometric_model.IsometricModel()
result.add_metric_value(metric_name=consts.FEATURE_IMPORTANCE, metric_info=single_info)
return result
def feature_importance_with_anonymous_converter(model_meta, model_param):
# extract feature importance from model param
fid_mapping = dict(model_param.feature_name_fid_mapping)
feat_importance_list = list(model_param.feature_importances)
local_fids = list(fid_mapping.keys())
local_cols, local_val = [], []
# key is int party id, value is a dict, which has two key: col_name and value
host_side_data = {}
for feat_importance in feat_importance_list:
fid = feat_importance.fid
importance = feat_importance.importance
site_name = feat_importance.sitename
if site_name == consts.HOST_LOCAL:
local_cols.append(fid_mapping[fid])
local_val.append(importance)
else:
site_name = site_name.split(':')
if site_name[0] == consts.HOST:
continue
else:
local_cols.append(fid_mapping[fid])
local_val.append(importance)
for fid in local_fids:
if fid_mapping[fid] not in local_cols:
local_cols.append(fid_mapping[fid])
local_val.append(0)
single_info = isometric_model.SingleMetricInfo(
values=np.array(local_val),
col_names=local_cols
)
result = isometric_model.IsometricModel()
result.add_metric_value(metric_name=consts.FEATURE_IMPORTANCE, metric_info=single_info)
return result
class HomoSBTAdapter(BaseAdapter):
def convert(self, model_meta, model_param):
return feature_importance_converter(model_meta, model_param)
class HeteroSBTAdapter(BaseAdapter):
def convert(self, model_meta, model_param):
return feature_importance_with_anonymous_converter(model_meta, model_param)
class HeteroFastSBTAdapter(BaseAdapter):
def convert(self, model_meta, model_param):
model_name = model_param.model_name
if model_name == consts.HETERO_FAST_SBT_LAYERED:
return feature_importance_with_anonymous_converter(model_meta, model_param)
elif model_name == consts.HETERO_FAST_SBT_MIX:
return feature_importance_with_anonymous_converter(model_meta, model_param)
else:
raise ValueError('model name {} is illegal'.format(model_name))
| 3,546 | 33.105769 | 91 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/psi_adapter.py
|
import numpy as np
from federatedml.feature.feature_selection.model_adapter import isometric_model
from federatedml.feature.feature_selection.model_adapter.adapter_base import BaseAdapter
from federatedml.util import consts
class PSIAdapter(BaseAdapter):
def convert(self, model_meta, model_param):
psi_scores = dict(model_param.total_score)
col_names, values = [], []
for name in psi_scores:
col_names.append(name)
values.append(psi_scores[name])
single_info = isometric_model.SingleMetricInfo(
values=np.array(values),
col_names=col_names
)
result = isometric_model.IsometricModel()
result.add_metric_value(metric_name=consts.PSI, metric_info=single_info)
return result
| 795 | 29.615385 | 88 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/binning_adapter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import operator
from federatedml.feature.feature_selection.model_adapter import isometric_model
from federatedml.feature.feature_selection.model_adapter.adapter_base import BaseAdapter
from federatedml.util import LOGGER
from federatedml.util import consts
class BinningAdapter(BaseAdapter):
def _load_one_class(self, local_result, remote_results):
values_dict = dict(local_result.binning_result)
values_sorted_dict = sorted(values_dict.items(), key=operator.itemgetter(0))
values = []
col_names = []
for n, v in values_sorted_dict:
values.append(v.iv)
col_names.append(n)
# LOGGER.debug(f"When loading iv, values: {values}, col_names: {col_names}")
host_party_ids = [int(x.party_id) for x in remote_results]
host_values = []
host_col_names = []
for host_obj in remote_results:
binning_result = dict(host_obj.binning_result)
h_values = []
h_col_names = []
for n, v in binning_result.items():
h_values.append(v.iv)
h_col_names.append(n)
host_values.append(np.array(h_values))
host_col_names.append(h_col_names)
# LOGGER.debug(f"host_party_ids: {host_party_ids}, host_values: {host_values},"
# f"host_col_names: {host_col_names}")
LOGGER.debug(f"host_party_ids: {host_party_ids}")
single_info = isometric_model.SingleMetricInfo(
values=np.array(values),
col_names=col_names,
host_party_ids=host_party_ids,
host_values=host_values,
host_col_names=host_col_names
)
return single_info
def convert(self, model_meta, model_param):
multi_class_result = model_param.multi_class_result
has_remote_result = multi_class_result.has_host_result
label_counts = len(list(multi_class_result.labels))
local_results = list(multi_class_result.results)
host_results = list(multi_class_result.host_results)
result = isometric_model.IsometricModel()
for idx, lr in enumerate(local_results):
if label_counts == 2:
result.add_metric_value(metric_name=f"iv",
metric_info=self._load_one_class(lr, host_results))
else:
if has_remote_result:
remote_results = [hs for i, hs in enumerate(host_results) if (i % label_counts) == idx]
else:
remote_results = []
result.add_metric_value(metric_name=f"iv",
metric_info=self._load_one_class(lr, remote_results))
return result
| 3,434 | 39.892857 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/statistic_adapter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.feature.feature_selection.model_adapter import isometric_model
from federatedml.feature.feature_selection.model_adapter.adapter_base import BaseAdapter
class StatisticAdapter(BaseAdapter):
def convert(self, model_meta, model_param):
result = isometric_model.IsometricModel()
self_values = model_param.self_values
for value_obj in list(self_values.results):
metric_name = value_obj.value_name
values = list(value_obj.values)
col_names = list(value_obj.col_names)
if len(values) != len(col_names):
raise ValueError(f"The length of values are not equal to the length"
f" of col_names with metric_name: {metric_name}")
metric_info = isometric_model.SingleMetricInfo(values, col_names)
result.add_metric_value(metric_name, metric_info)
return result
| 1,585 | 39.666667 | 88 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/feature/feature_selection/model_adapter/adapter_factory.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.feature.feature_selection.model_adapter.statistic_adapter import StatisticAdapter
from federatedml.feature.feature_selection.model_adapter.binning_adapter import BinningAdapter
from federatedml.feature.feature_selection.model_adapter.psi_adapter import PSIAdapter
from federatedml.feature.feature_selection.model_adapter import tree_adapter
from federatedml.feature.feature_selection.model_adapter import pearson_adapter
from federatedml.util import consts
def adapter_factory(model_name):
if model_name == consts.STATISTIC_MODEL:
return StatisticAdapter()
elif model_name == consts.BINNING_MODEL:
return BinningAdapter()
elif model_name == consts.PSI:
return PSIAdapter()
elif model_name == consts.HETERO_SBT:
return tree_adapter.HeteroSBTAdapter()
elif model_name == consts.HOMO_SBT:
return tree_adapter.HomoSBTAdapter()
elif model_name in [consts.HETERO_FAST_SBT_MIX, consts.HETERO_FAST_SBT_LAYERED]:
return tree_adapter.HeteroFastSBTAdapter()
elif model_name == "HeteroPearson":
return pearson_adapter.PearsonAdapter()
else:
raise ValueError(f"Cannot recognize model_name: {model_name}")
| 1,869 | 41.5 | 98 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.