repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/base_poisson_regression.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.linear_model.linear_model_base import BaseLinearModel
from federatedml.linear_model.linear_model_weight import LinearModelWeights as PoissonRegressionWeights
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.poisson_regression_param import PoissonParam
from federatedml.protobuf.generated import poisson_model_meta_pb2, poisson_model_param_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util.fate_operator import vec_dot
class BasePoissonRegression(BaseLinearModel):
def __init__(self):
super(BasePoissonRegression, self).__init__()
self.model_param = PoissonParam()
# attribute:
self.model_name = 'PoissonRegression'
self.model_param_name = 'PoissonRegressionParam'
self.model_meta_name = 'PoissonRegressionMeta'
self.cipher_operator = PaillierEncrypt()
self.exposure_index = -1
def _init_model(self, params):
super()._init_model(params)
self.exposure_colname = params.exposure_colname
@staticmethod
def get_exposure_index(header, exposure_colname):
try:
exposure_index = header.index(exposure_colname)
except BaseException:
exposure_index = -1
return exposure_index
@staticmethod
def load_instance(data_instance, exposure_index):
"""
return data_instance without exposure
Parameters
----------
data_instance: Table of Instances, input data
exposure_index: column index of exposure variable
"""
if exposure_index == -1:
return data_instance
if exposure_index >= len(data_instance.features):
raise ValueError(
"exposure_index {} out of features' range".format(exposure_index))
data_instance.features = np.delete(data_instance.features, exposure_index)
return data_instance
@staticmethod
def load_exposure(data_instance, exposure_index):
"""
return exposure of a given data_instance
Parameters
----------
data_instance: Table of Instances, input data
exposure_index: column index of exposure variable
"""
if exposure_index == -1:
exposure = 1
else:
exposure = data_instance.features[exposure_index]
return exposure
@staticmethod
def safe_log(v):
if v == 0:
return np.log(1e-7)
return np.log(v)
@staticmethod
def compute_mu(data_instances, coef_, intercept_=0, exposure=None):
if exposure is None:
mu = data_instances.mapValues(
lambda v: np.exp(vec_dot(v.features, coef_) + intercept_))
else:
offset = exposure.mapValues(lambda v: BasePoissonRegression.safe_log(v))
mu = data_instances.join(offset,
lambda v, m: np.exp(vec_dot(v.features, coef_) + intercept_ + m))
return mu
@staticmethod
def compute_wx(data_instances, coef_, intercept_=0):
return data_instances.mapValues(lambda v: vec_dot(v.features, coef_) + intercept_)
def _get_meta(self):
meta_protobuf_obj = poisson_model_meta_pb2.PoissonModelMeta(
penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
exposure_colname=self.exposure_colname)
return meta_protobuf_obj
def _get_param(self):
header = self.header
# LOGGER.debug("In get_param, header: {}".format(header))
weight_dict, intercept_ = {}, None
if header is not None:
for idx, header_name in enumerate(header):
coef_i = self.model_weights.coef_[idx]
weight_dict[header_name] = coef_i
intercept_ = self.model_weights.intercept_
best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
param_protobuf_obj = poisson_model_param_pb2.PoissonModelParam(iters=self.n_iter_,
loss_history=self.loss_history,
is_converged=self.is_converged,
weight=weight_dict,
intercept=intercept_,
header=header,
best_iteration=best_iteration)
return param_protobuf_obj
def load_model(self, model_dict):
result_obj = list(model_dict.get('model').values())[0].get(
self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
fit_intercept = meta_obj.fit_intercept
self.exposure_colname = meta_obj.exposure_colname
self.header = list(result_obj.header)
# For poisson regression arbiter predict function
if self.header is None:
return
feature_shape = len(self.header)
tmp_vars = np.zeros(feature_shape)
weight_dict = dict(result_obj.weight)
self.intercept_ = result_obj.intercept
for idx, header_name in enumerate(self.header):
tmp_vars[idx] = weight_dict.get(header_name)
if fit_intercept:
tmp_vars = np.append(tmp_vars, result_obj.intercept)
self.model_weights = PoissonRegressionWeights(l=tmp_vars,
fit_intercept=fit_intercept,
raise_overflow_error=False)
self.n_iter_ = result_obj.iters
def get_metrics_param(self):
return EvaluateParam(eval_type="regression", metrics=self.metrics)
| 6,914 | 39.676471 | 108 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/hetero_poisson_regression/hetero_poisson_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.poisson_regression. \
hetero_poisson_regression.hetero_poisson_base import HeteroPoissonBase
from federatedml.optim.gradient import hetero_poisson_gradient_and_loss
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroPoissonHost(HeteroPoissonBase):
def __init__(self):
super(HeteroPoissonHost, self).__init__()
self.batch_num = None
self.batch_index_list = []
self.role = consts.HOST
self.cipher = paillier_cipher.Host()
self.batch_generator = batch_generator.Host()
self.gradient_loss_operator = hetero_poisson_gradient_and_loss.Host()
self.converge_procedure = convergence.Host()
def fit(self, data_instances, validate_data=None):
"""
Train poisson regression model of role host
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_poisson host")
# self._abnormal_detection(data_instances)
# self.validation_strategy = self.init_validation_strategy(data_instances, validate_data)
# self.header = self.get_header(data_instances)
self.prepare_fit(data_instances, validate_data)
self.callback_list.on_train_begin(data_instances, validate_data)
self.cipher_operator = self.cipher.gen_paillier_cipher_operator()
self.batch_generator.initialize_batch_generator(data_instances)
LOGGER.info("Start initialize model.")
model_shape = self.get_features_shape(data_instances)
if self.init_param_obj.fit_intercept:
self.init_param_obj.fit_intercept = False
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept, raise_overflow_error=False)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
LOGGER.info("iter:" + str(self.n_iter_))
batch_data_generator = self.batch_generator.generate_batch_data()
self.optimizer.set_iters(self.n_iter_)
batch_index = 0
for batch_data in batch_data_generator:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter:" + str(self.n_iter_))
optim_host_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_data,
self.cipher_operator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index)
self.gradient_loss_operator.compute_loss(batch_data, self.model_weights,
self.optimizer,
self.n_iter_, batch_index, self.cipher_operator)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_host_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("Get is_converged flag from arbiter:{}".format(self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
def predict(self, data_instances):
"""
Prediction of poisson
Parameters
----------
data_instances:Table of Instance, input data
"""
self.transfer_variable.host_partial_prediction.disable_auto_clean()
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
# pred_host = self.compute_mu(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
pred_host = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
self.transfer_variable.host_partial_prediction.remote(pred_host, role=consts.GUEST, idx=0)
LOGGER.info("Remote partial prediction to Guest")
| 5,428 | 42.087302 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/hetero_poisson_regression/hetero_poisson_guest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.poisson_regression. \
hetero_poisson_regression.hetero_poisson_base import HeteroPoissonBase
from federatedml.optim.gradient import hetero_poisson_gradient_and_loss
from federatedml.statistic.data_overview import with_weight
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroPoissonGuest(HeteroPoissonBase):
def __init__(self):
super().__init__()
self.data_batch_count = []
self.role = consts.GUEST
self.cipher = paillier_cipher.Guest()
self.batch_generator = batch_generator.Guest()
self.gradient_loss_operator = hetero_poisson_gradient_and_loss.Guest()
self.converge_procedure = convergence.Guest()
def fit(self, data_instances, validate_data=None):
"""
Train poisson model of role guest
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_poisson_guest fit")
# self._abnormal_detection(data_instances)
# self.header = copy.deepcopy(self.get_header(data_instances))
self.prepare_fit(data_instances, validate_data)
self.callback_list.on_train_begin(data_instances, validate_data)
if with_weight(data_instances):
LOGGER.warning("input data with weight. Poisson regression does not support weighted training.")
self.exposure_index = self.get_exposure_index(self.header, self.exposure_colname)
exposure_index = self.exposure_index
if exposure_index > -1:
self.header.pop(exposure_index)
LOGGER.info("Guest provides exposure value.")
exposure = data_instances.mapValues(lambda v: HeteroPoissonBase.load_exposure(v, exposure_index))
data_instances = data_instances.mapValues(lambda v: HeteroPoissonBase.load_instance(v, exposure_index))
self.cipher_operator = self.cipher.gen_paillier_cipher_operator()
LOGGER.info("Generate mini-batch from input data")
self.batch_generator.initialize_batch_generator(data_instances, self.batch_size)
LOGGER.info("Start initialize model.")
LOGGER.info("fit_intercept:{}".format(self.init_param_obj.fit_intercept))
model_shape = self.get_features_shape(data_instances)
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept, raise_overflow_error=False)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter:{}".format(self.n_iter_))
# each iter will get the same batch_data_generator
batch_data_generator = self.batch_generator.generate_batch_data()
self.optimizer.set_iters(self.n_iter_)
batch_index = 0
for batch_data in batch_data_generator:
# compute offset of this batch
batch_offset = exposure.join(batch_data, lambda ei, d: HeteroPoissonBase.safe_log(ei))
# Start gradient procedure
optimized_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_data,
self.cipher_operator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index,
batch_offset
)
# LOGGER.debug("iteration:{} Guest's gradient: {}".format(self.n_iter_, optimized_gradient))
loss_norm = self.optimizer.loss_norm(self.model_weights)
self.gradient_loss_operator.compute_loss(batch_data, self.model_weights, self.n_iter_,
batch_index, batch_offset, loss_norm)
self.model_weights = self.optimizer.update_model(self.model_weights, optimized_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of Poisson
Parameters
----------
data_instances: Table of Instance, input data
Returns
----------
Table
include input data label, predict results
"""
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
header = data_instances.schema.get("header")
self.exposure_index = self.get_exposure_index(header, self.exposure_colname)
exposure_index = self.exposure_index
exposure = data_instances.mapValues(lambda v: HeteroPoissonBase.load_exposure(v, exposure_index))
data_instances = self.align_data_header(data_instances, self.header)
data_instances = data_instances.mapValues(lambda v: HeteroPoissonBase.load_instance(v, exposure_index))
# pred_guest = self.compute_mu(data_instances, self.model_weights.coef_, self.model_weights.intercept_, exposure)
# pred_host = self.transfer_variable.host_partial_prediction.get(idx=0)
wx_guest = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
wx_host = self.transfer_variable.host_partial_prediction.get(idx=0)
LOGGER.info("Get prediction from Host")
# pred = pred_guest.join(pred_host, lambda g, h: g * h)
pred = wx_guest.join(wx_host, lambda g, h: np.exp(g + h))
pred = pred.join(exposure, lambda mu, b: mu * b)
# predict_result = data_instances.join(pred, lambda d, p: [d.label, p, p, {"label": p}])
predict_result = self.predict_score_to_output(data_instances=data_instances, predict_score=pred,
classes=None)
return predict_result
| 7,418 | 44.237805 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/hetero_poisson_regression/hetero_poisson_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.linear_model.coordinated_linear_model.poisson_regression.base_poisson_regression import \
BasePoissonRegression
from federatedml.transfer_variable.transfer_class.hetero_poisson_transfer_variable import HeteroPoissonTransferVariable
from federatedml.util import consts
class HeteroPoissonBase(BasePoissonRegression):
def __init__(self):
super().__init__()
self.model_name = 'HeteroPoissonRegression'
self.model_param_name = 'HeteroPoissonRegressionParam'
self.model_meta_name = 'HeteroPoissonRegressionMeta'
self.mode = consts.HETERO
self.aggregator = None
self.cipher = None
self.batch_generator = None
self.gradient_loss_operator = None
self.converge_procedure = None
self.transfer_variable = HeteroPoissonTransferVariable()
def _init_model(self, params):
super(HeteroPoissonBase, self)._init_model(params)
self.cipher.register_paillier_cipher(self.transfer_variable)
self.converge_procedure.register_convergence(self.transfer_variable)
self.batch_generator.register_batch_generator(self.transfer_variable)
self.gradient_loss_operator.register_gradient_procedure(self.transfer_variable)
self.gradient_loss_operator.set_fixed_float_precision(self.model_param.floating_point_precision)
| 2,015 | 42.826087 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/hetero_poisson_regression/hetero_poisson_arbiter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.linear_model.coordinated_linear_model.base_linear_model_arbiter import HeteroBaseArbiter
from federatedml.linear_model.coordinated_linear_model.poisson_regression. \
hetero_poisson_regression.hetero_poisson_base import HeteroPoissonBase
from federatedml.optim.gradient import hetero_poisson_gradient_and_loss
from federatedml.param.poisson_regression_param import PoissonParam
from federatedml.transfer_variable.transfer_class.hetero_poisson_transfer_variable import HeteroPoissonTransferVariable
from federatedml.util import consts
class HeteroPoissonArbiter(HeteroBaseArbiter, HeteroPoissonBase):
def __init__(self):
super(HeteroPoissonArbiter, self).__init__()
self.gradient_loss_operator = hetero_poisson_gradient_and_loss.Arbiter()
self.model_param = PoissonParam()
self.n_iter_ = 0
self.header = None
self.model_param_name = 'HeteroPoissonRegressionParam'
self.model_meta_name = 'HeteroPoissonRegressionMeta'
self.model_name = 'HeteroPoissonRegression'
self.is_converged = False
self.mode = consts.HETERO
self.need_call_back_loss = True
self.transfer_variable = HeteroPoissonTransferVariable()
| 1,840 | 45.025 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/poisson_regression/hetero_poisson_regression/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661 | 35.777778 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/base_linear_regression.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.linear_model.linear_model_base import BaseLinearModel
from federatedml.linear_model.linear_model_weight import LinearModelWeights as LinearRegressionWeights
from federatedml.optim.initialize import Initializer
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.linear_regression_param import LinearParam
from federatedml.protobuf.generated import linr_model_param_pb2, linr_model_meta_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util.fate_operator import vec_dot
class BaseLinearRegression(BaseLinearModel):
def __init__(self):
super(BaseLinearRegression, self).__init__()
self.model_param = LinearParam()
# attribute:
self.n_iter_ = 0
self.feature_shape = None
self.gradient_operator = None
self.initializer = Initializer()
self.transfer_variable = None
self.loss_history = []
self.is_converged = False
self.header = None
self.model_name = 'LinearRegression'
self.model_param_name = 'LinearRegressionParam'
self.model_meta_name = 'LinearRegressionMeta'
self.role = ''
self.mode = ''
self.schema = {}
self.cipher_operator = PaillierEncrypt()
def _init_model(self, params):
super()._init_model(params)
def compute_wx(self, data_instances, coef_, intercept_=0):
return data_instances.mapValues(
lambda v: vec_dot(v.features, coef_) + intercept_)
def _get_meta(self):
meta_protobuf_obj = linr_model_meta_pb2.LinRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept)
return meta_protobuf_obj
def _get_param(self):
header = self.header
# LOGGER.debug("In get_param, header: {}".format(header))
weight_dict, intercept_ = {}, None
if header is not None:
weight_dict, intercept_ = self.get_weight_intercept_dict(header)
best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
param_protobuf_obj = linr_model_param_pb2.LinRModelParam(iters=self.n_iter_,
loss_history=self.loss_history,
is_converged=self.is_converged,
weight=weight_dict,
intercept=intercept_,
header=header,
best_iteration=best_iteration)
return param_protobuf_obj
def load_model(self, model_dict):
result_obj = list(model_dict.get('model').values())[0].get(
self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
fit_intercept = meta_obj.fit_intercept
self.header = list(result_obj.header)
if self.header is None:
return
feature_shape = len(self.header)
tmp_vars = np.zeros(feature_shape)
weight_dict = dict(result_obj.weight)
self.intercept_ = result_obj.intercept
for idx, header_name in enumerate(self.header):
tmp_vars[idx] = weight_dict.get(header_name)
if fit_intercept:
tmp_vars = np.append(tmp_vars, result_obj.intercept)
self.model_weights = LinearRegressionWeights(l=tmp_vars,
fit_intercept=fit_intercept,
raise_overflow_error=False)
self.n_iter_ = result_obj.iters
def get_metrics_param(self):
return EvaluateParam(eval_type="regression", metrics=self.metrics)
| 5,282 | 43.771186 | 108 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/hetero_linr_guest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_base import \
HeteroLinRBase
from federatedml.optim.gradient import hetero_linr_gradient_and_loss
from federatedml.statistic.data_overview import with_weight, scale_sample_weight
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroLinRGuest(HeteroLinRBase):
def __init__(self):
super().__init__()
self.data_batch_count = []
# self.guest_forward = None
self.role = consts.GUEST
self.cipher = paillier_cipher.Guest()
self.batch_generator = batch_generator.Guest()
self.gradient_loss_operator = hetero_linr_gradient_and_loss.Guest()
self.converge_procedure = convergence.Guest()
@staticmethod
def load_data(data_instance):
"""
return data_instance as original
Parameters
----------
data_instance: Table of Instance, input data
"""
return data_instance
def fit(self, data_instances, validate_data=None):
"""
Train linR model of role guest
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_linR_guest fit")
# self._abnormal_detection(data_instances)
# self.header = self.get_header(data_instances)
self.prepare_fit(data_instances, validate_data)
self.callback_list.on_train_begin(data_instances, validate_data)
self.cipher_operator = self.cipher.gen_paillier_cipher_operator()
use_async = False
if with_weight(data_instances):
if self.model_param.early_stop == "diff":
LOGGER.warning("input data with weight, please use 'weight_diff' for 'early_stop'.")
data_instances = scale_sample_weight(data_instances)
self.gradient_loss_operator.set_use_sample_weight()
LOGGER.debug(f"instance weight scaled; use weighted gradient loss operator")
# LOGGER.debug(f"data_instances after scale: {[v[1].weight for v in list(data_instances.collect())]}")
elif len(self.component_properties.host_party_idlist) == 1:
LOGGER.debug(f"set_use_async")
self.gradient_loss_operator.set_use_async()
use_async = True
self.transfer_variable.use_async.remote(use_async)
LOGGER.info("Generate mini-batch from input data")
self.batch_generator.initialize_batch_generator(data_instances, self.batch_size)
self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_nums)
LOGGER.info("Start initialize model.")
LOGGER.info("fit_intercept:{}".format(self.init_param_obj.fit_intercept))
model_shape = self.get_features_shape(data_instances)
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept, raise_overflow_error=False)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter:{}".format(self.n_iter_))
# each iter will get the same batch_data_generator
batch_data_generator = self.batch_generator.generate_batch_data()
self.optimizer.set_iters(self.n_iter_)
batch_index = 0
for batch_data in batch_data_generator:
# Start gradient procedure
optim_guest_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_data,
self.cipher_operator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index
)
loss_norm = self.optimizer.loss_norm(self.model_weights)
self.gradient_loss_operator.compute_loss(batch_data, self.n_iter_, batch_index, loss_norm)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_guest_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of linR
Parameters
----------
data_instances: Table of Instance, input data
predict_param: PredictParam, the setting of prediction.
Returns
----------
Table
include input data label, predict results
"""
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
pred = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
host_preds = self.transfer_variable.host_partial_prediction.get(idx=-1)
LOGGER.info("Get prediction from Host")
for host_pred in host_preds:
pred = pred.join(host_pred, lambda g, h: g + h)
# predict_result = data_instances.join(pred, lambda d, pred: [d.label, pred, pred, {"label": pred}])
predict_result = self.predict_score_to_output(data_instances=data_instances, predict_score=pred,
classes=None)
return predict_result
| 6,896 | 42.10625 | 123 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/hetero_linr_arbiter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.linear_model.coordinated_linear_model.base_linear_model_arbiter import HeteroBaseArbiter
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_base import \
HeteroLinRBase
from federatedml.optim.gradient import hetero_linr_gradient_and_loss
from federatedml.param.linear_regression_param import LinearParam
from federatedml.util import consts
from federatedml.transfer_variable.transfer_class.hetero_linr_transfer_variable import HeteroLinRTransferVariable
class HeteroLinRArbiter(HeteroBaseArbiter, HeteroLinRBase):
def __init__(self):
super(HeteroLinRArbiter, self).__init__()
self.gradient_loss_operator = hetero_linr_gradient_and_loss.Arbiter()
self.model_param = LinearParam()
self.n_iter_ = 0
self.header = None
self.model_param_name = 'HeteroLinearRegressionParam'
self.model_meta_name = 'HeteroLinearRegressionMeta'
self.model_name = 'HeteroLinearRegression'
self.is_converged = False
self.mode = consts.HETERO
self.need_call_back_loss = True
self.transfer_variable = HeteroLinRTransferVariable()
| 1,801 | 44.05 | 123 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/hetero_linr_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_base import \
HeteroLinRBase
from federatedml.optim.gradient import hetero_linr_gradient_and_loss
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLinRHost(HeteroLinRBase):
def __init__(self):
super(HeteroLinRHost, self).__init__()
self.batch_num = None
self.batch_index_list = []
self.role = consts.HOST
self.cipher = paillier_cipher.Host()
self.batch_generator = batch_generator.Host()
self.gradient_loss_operator = hetero_linr_gradient_and_loss.Host()
self.converge_procedure = convergence.Host()
def fit(self, data_instances, validate_data=None):
"""
Train linear regression model of role host
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_linR host")
# self._abnormal_detection(data_instances)
# self.header = self.get_header(data_instances)
self.prepare_fit(data_instances, validate_data)
self.callback_list.on_train_begin(data_instances, validate_data)
self.cipher_operator = self.cipher.gen_paillier_cipher_operator()
if self.transfer_variable.use_async.get(idx=0):
LOGGER.debug(f"set_use_async")
self.gradient_loss_operator.set_use_async()
self.batch_generator.initialize_batch_generator(data_instances)
self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_nums)
LOGGER.info("Start initialize model.")
model_shape = self.get_features_shape(data_instances)
if self.init_param_obj.fit_intercept:
self.init_param_obj.fit_intercept = False
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept, raise_overflow_error=False)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter:" + str(self.n_iter_))
self.optimizer.set_iters(self.n_iter_)
batch_data_generator = self.batch_generator.generate_batch_data()
batch_index = 0
for batch_data in batch_data_generator:
optim_host_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_data,
self.cipher_operator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index)
self.gradient_loss_operator.compute_loss(self.model_weights, self.optimizer, self.n_iter_, batch_index,
self.cipher_operator)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_host_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("Get is_converged flag from arbiter:{}".format(self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
# LOGGER.debug(f"summary content is: {self.summary()}")
def predict(self, data_instances):
"""
Prediction of linR
Parameters
----------
data_instances:Table of Instance, input data
"""
self.transfer_variable.host_partial_prediction.disable_auto_clean()
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
pred_host = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
self.transfer_variable.host_partial_prediction.remote(pred_host, role=consts.GUEST, idx=0)
LOGGER.info("Remote partial prediction to Guest")
| 5,368 | 40.945313 | 123 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/linear_regression/hetero_linear_regression/hetero_linr_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.linear_model.coordinated_linear_model.linear_regression.base_linear_regression \
import BaseLinearRegression
from federatedml.optim.gradient.hetero_sqn_gradient import sqn_factory
from federatedml.transfer_variable.transfer_class.hetero_linr_transfer_variable import HeteroLinRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLinRBase(BaseLinearRegression):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLinearRegression'
self.model_param_name = 'HeteroLinearRegressionParam'
self.model_meta_name = 'HeteroLinearRegressionMeta'
self.mode = consts.HETERO
self.aggregator = None
self.cipher = None
self.batch_generator = None
self.gradient_loss_operator = None
self.converge_procedure = None
self.transfer_variable = HeteroLinRTransferVariable()
def _init_model(self, params):
super(HeteroLinRBase, self)._init_model(params)
self.cipher.register_paillier_cipher(self.transfer_variable)
self.converge_procedure.register_convergence(self.transfer_variable)
self.batch_generator.register_batch_generator(self.transfer_variable)
self.gradient_loss_operator.register_gradient_procedure(self.transfer_variable)
self.gradient_loss_operator.set_fixed_float_precision(self.model_param.floating_point_precision)
if params.optimizer == 'sqn':
gradient_loss_operator = sqn_factory(self.role, params.sqn_param)
gradient_loss_operator.register_gradient_computer(self.gradient_loss_operator)
gradient_loss_operator.register_transfer_variable(self.transfer_variable)
gradient_loss_operator.unset_raise_weight_overflow_error()
self.gradient_loss_operator = gradient_loss_operator
LOGGER.debug("In _init_model, optimizer: {}, gradient_loss_operator: {}".format(
params.optimizer, self.gradient_loss_operator
))
| 2,700 | 45.568966 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/base_logistic_regression.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.linear_model.linear_model_base import BaseLinearModel
from federatedml.linear_model.linear_model_weight import LinearModelWeights as LogisticRegressionWeights
from federatedml.one_vs_rest.one_vs_rest import one_vs_rest_factory
from federatedml.optim.initialize import Initializer
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.logistic_regression_param import InitParam
from federatedml.protobuf.generated import lr_model_param_pb2
from federatedml.util import LOGGER, consts
from federatedml.util.fate_operator import vec_dot
class BaseLogisticRegression(BaseLinearModel):
def __init__(self):
super(BaseLogisticRegression, self).__init__()
# attribute:
self.initializer = Initializer()
self.model_name = 'LogisticRegression'
self.model_param_name = 'LogisticRegressionParam'
self.model_meta_name = 'LogisticRegressionMeta'
# one_vs_rest parameter
self.need_one_vs_rest = None
self.one_vs_rest_classes = []
self.one_vs_rest_obj = None
def _init_model(self, params):
super()._init_model(params)
self.one_vs_rest_obj = one_vs_rest_factory(self, role=self.role, mode=self.mode, has_arbiter=True)
def compute_wx(self, data_instances, coef_, intercept_=0):
return data_instances.mapValues(lambda v: vec_dot(v.features, coef_) + intercept_)
def get_single_model_param(self):
weight_dict = {}
# LOGGER.debug("in get_single_model_param, model_weights: {}, coef: {}, header: {}".format(
# self.model_weights.unboxed, self.model_weights.coef_, self.header
# ))
for idx, header_name in enumerate(self.header):
coef_i = self.model_weights.coef_[idx]
weight_dict[header_name] = coef_i
if hasattr(self, 'best_iteration'):
best_iter = self.best_iteration
else:
best_iter = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
result = {'iters': self.n_iter_,
'loss_history': self.loss_history,
'is_converged': self.is_converged,
'weight': weight_dict,
'intercept': self.model_weights.intercept_,
'header': self.header,
'best_iteration': best_iter
}
return result
def _get_param(self):
self.header = self.header if self.header else []
LOGGER.debug("In get_param, self.need_one_vs_rest: {}".format(self.need_one_vs_rest))
if self.need_cv:
param_protobuf_obj = lr_model_param_pb2.LRModelParam()
return param_protobuf_obj
if self.need_one_vs_rest:
one_vs_rest_result = self.one_vs_rest_obj.save(lr_model_param_pb2.SingleModel)
single_result = {'header': self.header, 'need_one_vs_rest': True, "best_iteration": -1}
else:
one_vs_rest_result = None
single_result = self.get_single_model_param()
single_result['need_one_vs_rest'] = False
single_result['one_vs_rest_result'] = one_vs_rest_result
# LOGGER.debug("in _get_param, single_result: {}".format(single_result))
param_protobuf_obj = lr_model_param_pb2.LRModelParam(**single_result)
return param_protobuf_obj
def load_model(self, model_dict):
LOGGER.debug("Start Loading model")
result_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
# self.fit_intercept = meta_obj.fit_intercept
if self.init_param_obj is None:
self.init_param_obj = InitParam()
self.init_param_obj.fit_intercept = meta_obj.fit_intercept
self.header = list(result_obj.header)
need_one_vs_rest = result_obj.need_one_vs_rest
if need_one_vs_rest:
one_vs_rest_result = result_obj.one_vs_rest_result
self.one_vs_rest_obj = one_vs_rest_factory(classifier=self, role=self.role,
mode=self.mode, has_arbiter=True)
self.one_vs_rest_obj.load_model(one_vs_rest_result)
self.need_one_vs_rest = True
else:
self.load_single_model(result_obj)
self.need_one_vs_rest = False
def load_single_model(self, single_model_obj):
LOGGER.info("It's a binary task, start to load single model")
feature_shape = len(self.header)
tmp_vars = np.zeros(feature_shape)
weight_dict = dict(single_model_obj.weight)
for idx, header_name in enumerate(self.header):
tmp_vars[idx] = weight_dict.get(header_name)
if self.fit_intercept:
tmp_vars = np.append(tmp_vars, single_model_obj.intercept)
if len(tmp_vars) == 0:
tmp_vars = [0.]
self.model_weights = LogisticRegressionWeights(tmp_vars, fit_intercept=self.fit_intercept)
self.n_iter_ = single_model_obj.iters
return self
def one_vs_rest_fit(self, train_data=None, validate_data=None):
LOGGER.debug("Class num larger than 2, need to do one_vs_rest")
self.one_vs_rest_obj.fit(data_instances=train_data, validate_data=validate_data)
def get_metrics_param(self):
if self.need_one_vs_rest:
eval_type = 'multi'
else:
eval_type = "binary"
return EvaluateParam(eval_type=eval_type, metrics=self.metrics)
| 6,204 | 40.925676 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from federatedml.logistic_regression import homo_logsitic_regression, hetero_logistic_regression
# from federatedml.logistic_regression.logistic_regression import LogisticRegression
#
# __all__ = ['homo_logsitic_regression', 'hetero_logistic_regression', 'LogisticRegression']
| 895 | 43.8 | 98 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_arbiter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.linear_model.coordinated_linear_model.base_linear_model_arbiter import HeteroBaseArbiter
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_base import \
HeteroLRBase
from federatedml.one_vs_rest.one_vs_rest import one_vs_rest_factory
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.param.logistic_regression_param import HeteroLogisticParam
from federatedml.transfer_variable.transfer_class.hetero_lr_transfer_variable import HeteroLRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLRArbiter(HeteroBaseArbiter, HeteroLRBase):
def __init__(self):
super(HeteroLRArbiter, self).__init__()
self.gradient_loss_operator = hetero_lr_gradient_and_loss.Arbiter()
self.model_param = HeteroLogisticParam()
self.n_iter_ = 0
self.header = []
self.is_converged = False
self.model_param_name = 'HeteroLogisticRegressionParam'
self.model_meta_name = 'HeteroLogisticRegressionMeta'
self.model_name = 'HeteroLogisticRegression'
self.need_one_vs_rest = None
self.need_call_back_loss = True
self.mode = consts.HETERO
self.transfer_variable = HeteroLRTransferVariable()
def _init_model(self, params):
super()._init_model(params)
self.model_weights = LinearModelWeights([], fit_intercept=self.fit_intercept)
self.one_vs_rest_obj = one_vs_rest_factory(self, role=self.role, mode=self.mode, has_arbiter=True)
def fit(self, data_instances=None, validate_data=None):
LOGGER.debug("Has loss_history: {}".format(hasattr(self, 'loss_history')))
LOGGER.debug("Need one_vs_rest: {}".format(self.need_one_vs_rest))
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
super().fit(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data):
super().fit(data_instances, validate_data)
| 3,004 | 45.953125 | 125 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.linear_model.coordinated_linear_model.logistic_regression.base_logistic_regression import \
BaseLogisticRegression
from federatedml.optim.gradient.hetero_sqn_gradient import sqn_factory
from federatedml.param.logistic_regression_param import HeteroLogisticParam
from federatedml.protobuf.generated import lr_model_meta_pb2
from federatedml.secureprotol import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.transfer_variable.transfer_class.hetero_lr_transfer_variable import HeteroLRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLRBase(BaseLogisticRegression):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLogisticRegression'
self.model_param_name = 'HeteroLogisticRegressionParam'
self.model_meta_name = 'HeteroLogisticRegressionMeta'
self.mode = consts.HETERO
self.aggregator = None
self.cipher = None
self.batch_generator = None
self.gradient_loss_operator = None
self.converge_procedure = None
self.model_param = HeteroLogisticParam()
self.transfer_variable = HeteroLRTransferVariable()
def _init_model(self, params):
super()._init_model(params)
self.encrypted_mode_calculator_param = params.encrypted_mode_calculator_param
if params.encrypt_param.method == consts.PAILLIER:
self.cipher_operator = PaillierEncrypt()
elif params.encrypt_param.method == consts.PAILLIER_IPCL:
self.cipher_operator = IpclPaillierEncrypt()
else:
raise ValueError(f"Unsupported encryption method: {params.encrypt_param.method}")
self.cipher.register_paillier_cipher(self.transfer_variable)
self.converge_procedure.register_convergence(self.transfer_variable)
self.batch_generator.register_batch_generator(self.transfer_variable)
self.gradient_loss_operator.register_gradient_procedure(self.transfer_variable)
# if len(self.component_properties.host_party_idlist) == 1:
# LOGGER.debug(f"set_use_async")
# self.gradient_loss_operator.set_use_async()
self.gradient_loss_operator.set_fixed_float_precision(self.model_param.floating_point_precision)
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
need_one_vs_rest=self.need_one_vs_rest)
return meta_protobuf_obj
def get_model_summary(self):
header = self.header
if header is None:
return {}
weight_dict, intercept_ = self.get_weight_intercept_dict(header)
# best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
summary = {"coef": weight_dict,
"intercept": intercept_,
"is_converged": self.is_converged,
"one_vs_rest": self.need_one_vs_rest,
"best_iteration": self.callback_variables.best_iteration}
if self.callback_variables.validation_summary is not None:
summary["validation_metrics"] = self.callback_variables.validation_summary
# if self.validation_strategy:
# validation_summary = self.validation_strategy.summary()
# if validation_summary:
# summary["validation_metrics"] = validation_summary
return summary
| 4,877 | 48.77551 | 110 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_guest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_base import \
HeteroLRBase
from federatedml.optim import activation
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.secureprotol import EncryptModeCalculator
from federatedml.statistic.data_overview import with_weight, scale_sample_weight
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroLRGuest(HeteroLRBase):
def __init__(self):
super().__init__()
self.data_batch_count = []
# self.guest_forward = None
self.role = consts.GUEST
self.cipher = paillier_cipher.Guest()
self.batch_generator = batch_generator.Guest()
self.gradient_loss_operator = hetero_lr_gradient_and_loss.Guest()
self.converge_procedure = convergence.Guest()
# self.need_one_vs_rest = None
@staticmethod
def load_data(data_instance):
"""
set the negative label to -1
Parameters
----------
data_instance: Table of Instance, input data
"""
data_instance = copy.deepcopy(data_instance)
if data_instance.label != 1:
data_instance.label = -1
return data_instance
def fit(self, data_instances, validate_data=None):
"""
Train lr model of role guest
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_lr_guest fit")
# self._abnormal_detection(data_instances)
# self.check_abnormal_values(data_instances)
# self.check_abnormal_values(validate_data)
# self.header = self.get_header(data_instances)
self.prepare_fit(data_instances, validate_data)
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if with_weight(data_instances):
data_instances = scale_sample_weight(data_instances)
self.gradient_loss_operator.set_use_sample_weight()
LOGGER.debug(f"instance weight scaled; use weighted gradient loss operator")
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data=None):
LOGGER.info("Enter hetero_lr_guest fit")
self.header = self.get_header(data_instances)
self.callback_list.on_train_begin(data_instances, validate_data)
data_instances = data_instances.mapValues(HeteroLRGuest.load_data)
LOGGER.debug(f"MODEL_STEP After load data, data count: {data_instances.count()}")
self.cipher_operator = self.cipher.gen_paillier_cipher_operator(method=self.model_param.encrypt_param.method)
self.batch_generator.initialize_batch_generator(data_instances, self.batch_size,
batch_strategy=self.batch_strategy,
masked_rate=self.masked_rate, shuffle=self.shuffle)
if self.batch_generator.batch_masked:
self.batch_generator.verify_batch_legality()
self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_nums)
use_async = False
if with_weight(data_instances):
if self.model_param.early_stop == "diff":
LOGGER.warning("input data with weight, please use 'weight_diff' for 'early_stop'.")
# data_instances = scale_sample_weight(data_instances)
# self.gradient_loss_operator.set_use_sample_weight()
# LOGGER.debug(f"data_instances after scale: {[v[1].weight for v in list(data_instances.collect())]}")
elif len(self.component_properties.host_party_idlist) == 1 and not self.batch_generator.batch_masked:
LOGGER.debug(f"set_use_async")
self.gradient_loss_operator.set_use_async()
use_async = True
self.transfer_variable.use_async.remote(use_async)
LOGGER.info("Generate mini-batch from input data")
LOGGER.info("Start initialize model.")
LOGGER.info("fit_intercept:{}".format(self.init_param_obj.fit_intercept))
model_shape = self.get_features_shape(data_instances)
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter: {}".format(self.n_iter_))
batch_data_generator = self.batch_generator.generate_batch_data(suffix=(self.n_iter_, ), with_index=True)
self.optimizer.set_iters(self.n_iter_)
batch_index = 0
for batch_data, index_data in batch_data_generator:
batch_feat_inst = batch_data
if not self.batch_generator.batch_masked:
index_data = None
# Start gradient procedure
LOGGER.debug(
"iter: {}, batch: {}, before compute gradient, data count: {}".format(
self.n_iter_, batch_index, batch_feat_inst.count()))
optim_guest_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_feat_inst,
self.cipher_operator,
self.model_weights,
self.optimizer,
self.n_iter_,
batch_index,
masked_index=index_data
)
loss_norm = self.optimizer.loss_norm(self.model_weights)
self.gradient_loss_operator.compute_loss(batch_feat_inst, self.model_weights, self.n_iter_, batch_index,
loss_norm, batch_masked=self.batch_generator.batch_masked)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_guest_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of lr
Parameters
----------
data_instances: Table of Instance, input data
Returns
----------
Table
include input data label, predict probably, label
"""
LOGGER.info("Start predict is a one_vs_rest task: {}".format(self.need_one_vs_rest))
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
if self.need_one_vs_rest:
predict_result = self.one_vs_rest_obj.predict(data_instances)
return predict_result
# data_features = self.transform(data_instances)
pred_prob = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
host_probs = self.transfer_variable.host_prob.get(idx=-1)
LOGGER.info("Get probability from Host")
# guest probability
for host_prob in host_probs:
pred_prob = pred_prob.join(host_prob, lambda g, h: g + h)
pred_prob = pred_prob.mapValues(lambda p: activation.sigmoid(p))
threshold = self.model_param.predict_param.threshold
# pred_label = pred_prob.mapValues(lambda x: 1 if x > threshold else 0)
# predict_result = data_instances.mapValues(lambda x: x.label)
# predict_result = predict_result.join(pred_prob, lambda x, y: (x, y))
# predict_result = predict_result.join(pred_label, lambda x, y: [x[0], y, x[1],
# {"0": (1 - x[1]), "1": x[1]}])
predict_result = self.predict_score_to_output(data_instances, pred_prob, classes=[0, 1], threshold=threshold)
return predict_result
| 9,656 | 43.09589 | 125 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/hetero_logistic_regression/hetero_lr_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.framework.hetero.procedure import convergence
from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_base import \
HeteroLRBase
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroLRHost(HeteroLRBase):
def __init__(self):
super(HeteroLRHost, self).__init__()
self.batch_num = None
self.batch_index_list = []
self.role = consts.HOST
self.cipher = paillier_cipher.Host()
self.batch_generator = batch_generator.Host()
self.gradient_loss_operator = hetero_lr_gradient_and_loss.Host()
self.converge_procedure = convergence.Host()
def fit(self, data_instances, validate_data=None):
"""
Train lr model of role host
Parameters
----------
data_instances: Table of Instance, input data
"""
LOGGER.info("Enter hetero_logistic_regression host")
# self.header = self.get_header(data_instances)
self.prepare_fit(data_instances, validate_data)
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data):
# self._abnormal_detection(data_instances)
# self.check_abnormal_values(data_instances)
# self.check_abnormal_values(validate_data)
# self.validation_strategy = self.init_validation_strategy(data_instances, validate_data)
self.callback_list.on_train_begin(data_instances, validate_data)
LOGGER.debug(f"MODEL_STEP Start fin_binary, data count: {data_instances.count()}")
self.header = self.get_header(data_instances)
model_shape = self.get_features_shape(data_instances)
self.cipher_operator = self.cipher.gen_paillier_cipher_operator(method=self.model_param.encrypt_param.method)
self.batch_generator.initialize_batch_generator(data_instances, shuffle=self.shuffle)
if self.batch_generator.batch_masked:
self.batch_generator.verify_batch_legality(least_batch_size=model_shape)
if self.transfer_variable.use_async.get(idx=0):
LOGGER.debug(f"set_use_async")
self.gradient_loss_operator.set_use_async()
self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_nums)
LOGGER.info("Start initialize model.")
# model_shape = self.get_features_shape(data_instances)
if self.init_param_obj.fit_intercept:
self.init_param_obj.fit_intercept = False
if not self.component_properties.is_warm_start:
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.model_weights = LinearModelWeights(w, fit_intercept=self.init_param_obj.fit_intercept)
else:
self.callback_warm_start_init_iter(self.n_iter_)
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info("iter: " + str(self.n_iter_))
batch_data_generator = self.batch_generator.generate_batch_data(suffix=(self.n_iter_, ))
batch_index = 0
self.optimizer.set_iters(self.n_iter_)
for batch_data in batch_data_generator:
# transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'
batch_feat_inst = batch_data
# LOGGER.debug(f"MODEL_STEP In Batch {batch_index}, batch data count: {batch_feat_inst.count()}")
LOGGER.debug(
"iter: {}, batch: {}, before compute gradient, data count: {}".format(
self.n_iter_, batch_index, batch_feat_inst.count()))
optim_host_gradient = self.gradient_loss_operator.compute_gradient_procedure(
batch_feat_inst, self.cipher_operator, self.model_weights, self.optimizer, self.n_iter_,
batch_index)
# LOGGER.debug('optim_host_gradient: {}'.format(optim_host_gradient))
self.gradient_loss_operator.compute_loss(self.model_weights, self.optimizer,
self.n_iter_, batch_index, self.cipher_operator,
batch_masked=self.batch_generator.batch_masked)
self.model_weights = self.optimizer.update_model(self.model_weights, optim_host_gradient)
batch_index += 1
self.is_converged = self.converge_procedure.sync_converge_info(suffix=(self.n_iter_,))
LOGGER.info("Get is_converged flag from arbiter:{}".format(self.is_converged))
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
LOGGER.debug(f"flowid: {self.flowid}, step_index: {self.n_iter_}")
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
if self.is_converged:
break
self.callback_list.on_train_end()
self.set_summary(self.get_model_summary())
# LOGGER.debug("Final lr weights: {}".format(self.model_weights.unboxed))
def predict(self, data_instances):
self.transfer_variable.host_prob.disable_auto_clean()
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
if self.need_one_vs_rest:
self.one_vs_rest_obj.predict(data_instances)
return
prob_host = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
self.transfer_variable.host_prob.remote(prob_host, role=consts.GUEST, idx=0)
LOGGER.info("Remote probability to Guest")
| 7,075 | 44.948052 | 125 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/homo_lr_client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch as t
from fate_arch.computing._util import is_table
from federatedml.linear_model.coordinated_linear_model.logistic_regression.\
homo_logistic_regression.homo_lr_base import HomoLRBase
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.nn.dataset.table import TableDataset
from federatedml.nn.homo.trainer.trainer_base import ExporterBase
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.callbacks.validation_strategy import ValidationStrategy
from federatedml.protobuf.generated import lr_model_param_pb2
from federatedml.model_base import MetricMeta
from fate_arch.session import computing_session
from federatedml.nn.backend.utils.data import get_ret_predict_table, add_match_id
from federatedml.nn.loss.weighted_loss import WeightedBCE
from federatedml.statistic.data_overview import check_with_inst_id
def linear_weight_to_torch(model_weights):
model_weights: LinearModelWeights = model_weights
weights = model_weights.coef_
bias = None
use_bias = False
if model_weights.fit_intercept:
bias = model_weights.intercept_
use_bias = True
torch_linear_layer = t.nn.Linear(
in_features=weights.shape[0], out_features=1, bias=use_bias)
LOGGER.debug('weights are {}, biase is {}'.format(weights, bias))
torch_linear_layer.weight.data.copy_(t.Tensor(weights))
if use_bias:
torch_linear_layer.bias.data.copy_(t.Tensor([bias]))
torch_model = t.nn.Sequential(
torch_linear_layer,
t.nn.Sigmoid()
)
return torch_model
def torch_to_linear_weight(model_weights, torch_model):
if model_weights.fit_intercept:
model_weights._weights = np.concatenate([torch_model[0].weight.detach().numpy().flatten(),
torch_model[0].bias.detach().numpy().flatten()]).tolist()
else:
model_weights._weights = torch_model[0].weight.detach(
).numpy().flatten().tolist()
class WrappedOptAndScheduler(object):
def __init__(self, opt, scheduler):
self.opt = opt
self.scheduler = scheduler
def zero_grad(self, ):
self.opt.zero_grad()
def step(self, ):
self.opt.step()
self.scheduler.step()
def state_dict(self):
return self.opt.state_dict()
def restep(self, n):
for i in range(n):
self.opt.zero_grad()
self.opt.step()
self.scheduler.step()
class HomoLRClientExporter(ExporterBase):
def __init__(self, header, homo_lr_meta, model_weights, param_name, meta_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.header = header
self.homo_lr_meta = homo_lr_meta
self.model_weights = model_weights
self.param_name = param_name
self.meta_name = meta_name
def export_model_dict(
self,
model=None,
optimizer=None,
model_define=None,
optimizer_define=None,
loss_define=None,
epoch_idx=None,
converge_status=None,
loss_history=None,
best_epoch=None,
extra_data={}):
torch_to_linear_weight(self.model_weights, model)
weight_dict = {}
for idx, header_name in enumerate(self.header):
coef_i = self.model_weights.coef_[idx]
weight_dict[header_name] = float(coef_i)
result = {'iters': epoch_idx,
'loss_history': loss_history,
'is_converged': converge_status,
'weight': weight_dict,
'intercept': self.model_weights.intercept_,
'header': self.header,
'best_iteration': best_epoch
}
param = lr_model_param_pb2.LRModelParam(**result)
meta = self.homo_lr_meta
return {self.param_name: param, self.meta_name: meta}
class HomoLRClient(HomoLRBase):
def __init__(self):
super(HomoLRClient, self).__init__()
self.loss_history = []
self.role = consts.GUEST
self.dataset_cache = {}
self.trainer = None
self.best_iteration = -1
# check point
self.save_freq = None
self.model_checkpoint = None
def _init_model(self, params):
super()._init_model(params)
def get_dataset(self, data):
if id(data) in self.dataset_cache:
return self.dataset_cache[id(data)]
if is_table(data):
dataset = TableDataset()
dataset.load(data)
self.dataset_cache[id(data)] = dataset
return dataset
else:
raise RuntimeError('unknown data type {}'.format(data))
def init(self, dataset: TableDataset, partitions):
torch_model = linear_weight_to_torch(self.model_weights)
LOGGER.debug('torch model is {}, parameters are {} dataset {}'.format(
torch_model, list(torch_model.parameters()), dataset))
batch_size = len(dataset) if self.batch_size == -1 else self.batch_size
optimizer, scheduler = self.get_torch_optimizer(
torch_model, self.model_param)
wrap_optimizer = WrappedOptAndScheduler(optimizer, scheduler)
LOGGER.debug('init optimizer statedict is {}'.format(wrap_optimizer.state_dict()))
if dataset.with_sample_weight:
loss = WeightedBCE()
else:
loss = t.nn.BCELoss()
early_stop = None
if self.early_stop != 'weight_diff':
early_stop = self.early_stop
trainer = FedAVGTrainer(
epochs=self.max_iter,
batch_size=batch_size,
data_loader_worker=partitions,
secure_aggregate=True,
aggregate_every_n_epoch=self.aggregate_iters,
validation_freqs=self.validation_freqs,
task_type='binary',
checkpoint_save_freqs=self.save_freq,
early_stop=early_stop,
shuffle=False,
tol=self.tol)
if not self.callback_one_vs_rest:
trainer.set_tracker(self.tracker)
trainer.set_model(torch_model)
trainer.set_model_exporter(
HomoLRClientExporter(
header=self.header,
homo_lr_meta=self._get_meta(),
model_weights=self.model_weights,
meta_name=self.model_meta_name,
param_name=self.model_param_name))
trainer.set_checkpoint(self.model_checkpoint)
return trainer, torch_model, wrap_optimizer, loss
def get_model_summary(self, is_converged, best_iteration, loss_history, eval_summary):
header = self.header
if header is None:
return {}
weight_dict, intercept_ = self.get_weight_intercept_dict(header)
summary = {"coef": weight_dict,
"intercept": intercept_,
"is_converged": is_converged,
"best_iteration": best_iteration,
"local_loss_history": loss_history,
"validation_metrics": eval_summary
}
return summary
def fit_binary(self, data_instances, validate_data=None):
for callback_cpn in self.callback_list.callback_list:
if isinstance(callback_cpn, ModelCheckpoint):
self.save_freq = callback_cpn.save_freq
self.model_checkpoint = callback_cpn
elif isinstance(callback_cpn, ValidationStrategy):
self.validation_freqs = callback_cpn.validation_freqs
train_set = self.get_dataset(data_instances)
train_set.set_type('train')
if validate_data is not None:
val_set = self.get_dataset(validate_data)
val_set.set_type('validate')
else:
val_set = None
if not self.component_properties.is_warm_start:
self.model_weights = self._init_model_variables(data_instances)
else:
LOGGER.debug('callback warm start, iter {}'.format(self.n_iter_))
self.callback_warm_start_init_iter(self.n_iter_ + 1)
# fate loss callback setting
LOGGER.debug('need one vs rest {}'.format(self.need_one_vs_rest))
if not self.callback_one_vs_rest: # ovr does not display loss
self.callback_meta(
"loss",
"train",
MetricMeta(
name="train",
metric_type="LOSS",
extra_metas={
"unit_name": "epochs"}))
self.trainer, torch_model, wrap_optimizer, loss = self.init(
train_set, data_instances.partitions)
if self.component_properties.is_warm_start:
wrap_optimizer.restep(self.n_iter_ + 1)
self.trainer.train(train_set, val_set, loss=loss,
optimizer=wrap_optimizer)
torch_to_linear_weight(self.model_weights, torch_model)
eval_summary = self.trainer.get_evaluation_summary()
summary = self.trainer.get_summary()
self.is_converged, self.best_iteration, self.loss_history = summary[
'need_stop'], summary['best_epoch'], summary['loss_history']
self.n_iter_ = len(self.loss_history) - 1
self.set_summary(self.get_model_summary(
self.best_iteration, self.loss_history, self.is_converged, eval_summary))
@assert_io_num_rows_equal
def predict(self, data_instances):
self._abnormal_detection(data_instances)
self.init_schema(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
with_inst_id = check_with_inst_id(data_instances)
dataset = self.get_dataset(data_instances)
if self.need_one_vs_rest:
predict_result = self.one_vs_rest_obj.predict(data_instances)
return predict_result
dataset.set_type('predict')
if self.trainer is None:
self.trainer, torch_model, wrap_optimizer, loss = self.init(
dataset, data_instances.partitions)
trainer_ret = self.trainer.predict(dataset)
id_table, pred_table, classes = trainer_ret()
if with_inst_id:
add_match_id(id_table=id_table, dataset_inst=dataset)
id_dtable, pred_dtable = get_ret_predict_table(
id_table, pred_table, classes, data_instances.partitions, computing_session)
ret_table = self.predict_score_to_output(
id_dtable, pred_dtable, classes)
return ret_table
| 11,511 | 35.087774 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/homo_lr_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import torch as t
import math
from torch.optim.lr_scheduler import LambdaLR
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.logistic_regression.base_logistic_regression import BaseLogisticRegression
from federatedml.optim import activation
from federatedml.param.logistic_regression_param import HomoLogisticParam
from federatedml.protobuf.generated import lr_model_meta_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.statistic import data_overview
from federatedml.transfer_variable.transfer_class.homo_lr_transfer_variable import HomoLRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util import fate_operator
class HomoLRBase(BaseLogisticRegression):
def __init__(self):
super(HomoLRBase, self).__init__()
self.model_name = 'HomoLogisticRegression'
self.model_param_name = 'HomoLogisticRegressionParam'
self.model_meta_name = 'HomoLogisticRegressionMeta'
self.mode = consts.HOMO
self.model_param = HomoLogisticParam()
self.aggregator = None
self.param = None
def get_torch_optimizer(self, torch_model: t.nn.Module, param: HomoLogisticParam):
try:
learning_rate = param.learning_rate
alpha = param.alpha # L2 penalty weight
decay = param.decay
decay_sqrt = param.decay_sqrt
if not decay_sqrt:
def decay_func(epoch): return 1 / (1 + epoch * decay)
else:
def decay_func(epoch): return 1 / math.sqrt(1 + epoch * decay)
except AttributeError:
raise AttributeError("Optimizer parameters has not been totally set")
optimizer_type = param.optimizer
if optimizer_type == 'sgd':
opt = t.optim.SGD(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'nesterov_momentum_sgd':
opt = t.optim.SGD(
params=torch_model.parameters(),
nesterov=True,
momentum=0.9,
lr=learning_rate,
weight_decay=alpha)
elif optimizer_type == 'rmsprop':
opt = t.optim.RMSprop(params=torch_model.parameters(), alpha=0.99, lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'adam':
opt = t.optim.Adam(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'adagrad':
opt = t.optim.Adagrad(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
else:
if optimizer_type == 'sqn':
raise NotImplementedError("Sqn optimizer is not supported in Homo-LR")
raise NotImplementedError("Optimize method cannot be recognized: {}".format(optimizer_type))
scheduler = LambdaLR(opt, lr_lambda=decay_func)
return opt, scheduler
def _init_model(self, params):
super(HomoLRBase, self)._init_model(params)
self.transfer_variable = HomoLRTransferVariable()
# self.aggregator.register_aggregator(self.transfer_variable)
self.param = params
self.aggregate_iters = params.aggregate_iters
@property
def use_loss(self):
if self.model_param.early_stop == 'weight_diff':
return False
return True
def fit(self, data_instances, validate_data=None):
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if self.role == consts.ARBITER:
self._server_check_data()
else:
self._client_check_data(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
if self.header is None:
self.header = self.one_vs_rest_obj.header
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data):
raise NotImplementedError("Should not called here")
def _client_check_data(self, data_instances):
self._abnormal_detection(data_instances)
self.check_abnormal_values(data_instances)
self.init_schema(data_instances)
# Support multi-class now
"""
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_instances)
aligned_label, new_label_mapping = HomoLabelEncoderClient().label_alignment(classes_)
if len(aligned_label) > 2:
raise ValueError("Homo LR support binary classification only now")
elif len(aligned_label) <= 1:
raise ValueError("Number of classes should be equal to 2")
"""
def _server_check_data(self):
# HomoLabelEncoderArbiter().label_alignment()
pass
def classify(self, predict_wx, threshold):
"""
convert a probability table into a predicted class table.
"""
# predict_wx = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
def predict(x):
prob = activation.sigmoid(x)
pred_label = 1 if prob > threshold else 0
return prob, pred_label
predict_table = predict_wx.mapValues(predict)
return predict_table
def _init_model_variables(self, data_instances):
model_shape = data_overview.get_features_shape(data_instances)
LOGGER.info("Initialized model shape is {}".format(model_shape))
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj,
data_instance=data_instances)
model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept)
return model_weights
def _compute_loss(self, data_instances, prev_round_weights):
f = functools.partial(self.gradient_operator.compute_loss,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
loss = data_instances.applyPartitions(f).reduce(fate_operator.reduce_add)
if self.use_proximal: # use additional proximal term
loss_norm = self.optimizer.loss_norm(self.model_weights,
prev_round_weights)
else:
loss_norm = self.optimizer.loss_norm(self.model_weights)
if loss_norm is not None:
loss += loss_norm
loss /= data_instances.count()
if self.need_call_back_loss:
self.callback_loss(self.n_iter_, loss)
self.loss_history.append(loss)
return loss
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
module='HomoLR',
need_one_vs_rest=self.need_one_vs_rest)
return meta_protobuf_obj
| 8,574 | 42.090452 | 129 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
| 619 | 33.444444 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/homo_lr_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.linear_model.coordinated_linear_model.\
logistic_regression.homo_logistic_regression.homo_lr_base import HomoLRBase
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.model_base import MetricMeta
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.callbacks.validation_strategy import ValidationStrategy
from federatedml.nn.homo.trainer.trainer_base import ExporterBase
from federatedml.protobuf.generated import lr_model_param_pb2, lr_model_meta_pb2
class HomoLRServerExporter(ExporterBase):
def __init__(self, param_name, meta_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.param_name = param_name
self.meta_name = meta_name
def export_model_dict(self, *args, **kwargs):
# return empty model only
return {self.param_name: lr_model_param_pb2.LRModelParam(), self.meta_name: lr_model_meta_pb2.LRModelMeta()}
class HomoLRServer(HomoLRBase):
def __init__(self):
super(HomoLRServer, self).__init__()
self.re_encrypt_times = [] # Record the times needed for each host
self.role = consts.ARBITER
self.trainer = None
# check point
self.save_freq = None
self.model_checkpoint = None
def _init_model(self, params):
super()._init_model(params)
def fit_binary(self, data_instances=None, validate_data=None):
for callback_cpn in self.callback_list.callback_list:
if isinstance(callback_cpn, ModelCheckpoint):
self.save_freq = callback_cpn.save_freq
self.model_checkpoint = callback_cpn
elif isinstance(callback_cpn, ValidationStrategy):
self.validation_freqs = callback_cpn.validation_freqs
# fate loss callback setting
if not self.callback_one_vs_rest: # ovr does not display loss
self.callback_meta(
"loss", "train", MetricMeta(
name="train", metric_type="LOSS", extra_metas={
"unit_name": "aggregate_round"}))
early_stop = None
if self.early_stop != 'weight_diff':
early_stop = self.early_stop
self.trainer = FedAVGTrainer(
epochs=self.max_iter,
secure_aggregate=True,
aggregate_every_n_epoch=self.aggregate_iters,
validation_freqs=self.validation_freqs,
task_type='binary',
checkpoint_save_freqs=self.save_freq,
early_stop=early_stop,
tol=self.tol,
shuffle=False
)
if self.one_vs_rest_obj is None:
self.trainer.set_tracker(self.tracker)
self.trainer.set_checkpoint(self.model_checkpoint)
self.trainer.set_model_exporter(HomoLRServerExporter(self.model_param_name, self.model_meta_name))
self.trainer.server_aggregate_procedure()
LOGGER.info("Finish Training task")
def predict(self, data_instantces=None):
LOGGER.info(f'Start predict task')
pass
def export_model(self):
# arbiter does not save models
return None
def load_model(self, model_dict):
# do nothing now
return None
| 3,980 | 36.914286 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_linear_model.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from abc import ABC
import numpy as np
from fate_arch.session import get_parties
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.linear_model.linear_model_base import BaseLinearModel
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.param.init_model_param import InitParam
from federatedml.protobuf.generated import sshe_cipher_param_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.fate_paillier import PaillierPublicKey, PaillierPrivateKey, PaillierEncryptedNumber
from federatedml.secureprotol.fixedpoint import FixedPointEndec
from federatedml.secureprotol.spdz import SPDZ
from federatedml.secureprotol.spdz.secure_matrix.secure_matrix import SecureMatrix
from federatedml.secureprotol.spdz.tensor import fixedpoint_table, fixedpoint_numpy
from federatedml.statistic.data_overview import with_weight, scale_sample_weight
from federatedml.transfer_variable.transfer_class.batch_generator_transfer_variable import \
BatchGeneratorTransferVariable
from federatedml.transfer_variable.transfer_class.converge_checker_transfer_variable import \
ConvergeCheckerTransferVariable
from federatedml.transfer_variable.transfer_class.sshe_model_transfer_variable import SSHEModelTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroSSHEBase(BaseLinearModel, ABC):
def __init__(self):
super().__init__()
self.mode = consts.HETERO
self.cipher = None
self.q_field = None
self.model_param = None
# self.labels = None
self.weight = None
self.batch_generator = None
self.batch_num = []
self.secure_matrix_obj: SecureMatrix
# self._set_parties()
self.parties = None
self.local_party = None
self.other_party = None
self.label_type = None
def _transfer_q_field(self):
raise NotImplementedError(f"Should not be called here")
def _init_model(self, params):
super()._init_model(params)
self.cipher = PaillierEncrypt()
self.cipher.generate_key(self.model_param.encrypt_param.key_length)
self.transfer_variable = SSHEModelTransferVariable()
self.converge_func_name = params.early_stop
self.reveal_every_iter = params.reveal_every_iter
self.q_field = self._transfer_q_field()
LOGGER.debug(f"q_field: {self.q_field}")
if not self.reveal_every_iter:
self.self_optimizer = copy.deepcopy(self.optimizer)
self.remote_optimizer = copy.deepcopy(self.optimizer)
self.fixedpoint_encoder = FixedPointEndec(n=self.q_field)
self.converge_transfer_variable = ConvergeCheckerTransferVariable()
self.secure_matrix_obj = SecureMatrix(party=self.local_party,
q_field=self.q_field,
other_party=self.other_party)
def _init_weights(self, model_shape):
return self.initializer.init_model(model_shape, init_params=self.init_param_obj)
@property
def is_respectively_reveal(self):
return self.model_param.reveal_strategy == "respectively"
def _cal_z_in_share(self, w_self, w_remote, features, suffix, cipher):
raise NotImplementedError("Should not be called here")
def share_model(self, w, suffix):
raise NotImplementedError("Should not be called here")
def forward(self, weights, features, labels, suffix, cipher, batch_weight):
raise NotImplementedError("Should not be called here")
def backward(self, error, features, suffix, cipher):
raise NotImplementedError("Should not be called here")
def compute_loss(self, weights, labels, suffix, cipher):
raise NotImplementedError("Should not be called here")
def reveal_models(self, w_self, w_remote, suffix=None):
raise NotImplementedError(f"Should not be called here")
def check_converge_by_loss(self, loss, suffix):
raise NotImplementedError(f"Should not be called here")
def check_converge_by_weights(self, last_w, new_w, suffix):
if self.reveal_every_iter:
return self._reveal_every_iter_weights_check(last_w, new_w, suffix)
else:
return self._not_reveal_every_iter_weights_check(last_w, new_w, suffix)
def _reveal_every_iter_weights_check(self, last_w, new_w, suffix):
raise NotImplementedError("Should not be called here")
def _not_reveal_every_iter_weights_check(self, last_w, new_w, suffix):
last_w_self, last_w_remote = last_w
w_self, w_remote = new_w
grad_self = w_self - last_w_self
grad_remote = w_remote - last_w_remote
if self.role == consts.GUEST:
grad_encode = np.hstack((grad_remote.value, grad_self.value))
else:
grad_encode = np.hstack((grad_self.value, grad_remote.value))
grad_encode = np.array([grad_encode])
grad_tensor_name = ".".join(("check_converge_grad",) + suffix)
grad_tensor = fixedpoint_numpy.FixedPointTensor(value=grad_encode,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=grad_tensor_name)
grad_tensor_transpose_name = ".".join(("check_converge_grad_transpose",) + suffix)
grad_tensor_transpose = fixedpoint_numpy.FixedPointTensor(value=grad_encode.T,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=grad_tensor_transpose_name)
grad_norm_tensor_name = ".".join(("check_converge_grad_norm",) + suffix)
grad_norm = grad_tensor.dot(grad_tensor_transpose, target_name=grad_norm_tensor_name).get()
weight_diff = np.sqrt(grad_norm[0][0])
LOGGER.info("iter: {}, weight_diff:{}, is_converged: {}".format(self.n_iter_,
weight_diff, self.is_converged))
is_converge = False
if weight_diff < self.model_param.tol:
is_converge = True
return is_converge
def get_single_model_weight_dict(self, model_weights=None, header=None):
header = header if header else self.header
model_weights = model_weights if model_weights else self.model_weights
weight_dict = {}
for idx, header_name in enumerate(header):
coef_i = model_weights.coef_[idx]
weight_dict[header_name] = coef_i
return weight_dict
def get_single_model_param(self, model_weights=None, header=None):
header = header if header else self.header
result = {'iters': self.n_iter_,
'loss_history': self.loss_history,
'is_converged': self.is_converged,
'intercept': self.model_weights.intercept_,
'header': header,
'best_iteration': -1 if self.validation_strategy is None else
self.validation_strategy.best_iteration
}
return result
def load_model(self, model_dict):
LOGGER.debug("Start Loading model")
result_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
if self.init_param_obj is None:
self.init_param_obj = InitParam()
self.init_param_obj.fit_intercept = meta_obj.fit_intercept
self.model_param.reveal_strategy = meta_obj.reveal_strategy
LOGGER.debug(f"reveal_strategy: {self.model_param.reveal_strategy}, {self.is_respectively_reveal}")
self.header = list(result_obj.header)
return result_obj, meta_obj
def load_single_model(self, single_model_obj):
raise NotImplementedError(f"should not be called here")
def load_single_model_weight(self, single_model_obj):
feature_shape = len(self.header)
tmp_vars = np.zeros(feature_shape)
weight_dict = dict(single_model_obj.weight)
for idx, header_name in enumerate(self.header):
tmp_vars[idx] = weight_dict.get(header_name)
if self.fit_intercept:
tmp_vars = np.append(tmp_vars, single_model_obj.intercept)
self.model_weights = LinearModelWeights(tmp_vars, fit_intercept=self.fit_intercept)
def fit_single_model(self, data_instances, validate_data=None):
LOGGER.info(f"Start to train single {self.model_name}")
if len(self.component_properties.host_party_idlist) > 1:
raise ValueError(f"Hetero SSHE Model does not support multi-host training.")
self.callback_list.on_train_begin(data_instances, validate_data)
model_shape = self.get_features_shape(data_instances)
instances_count = data_instances.count()
if not self.component_properties.is_warm_start:
w = self._init_weights(model_shape)
self.model_weights = LinearModelWeights(l=w,
fit_intercept=self.model_param.init_param.fit_intercept)
last_models = copy.deepcopy(self.model_weights)
else:
last_models = copy.deepcopy(self.model_weights)
w = last_models.unboxed
self.callback_warm_start_init_iter(self.n_iter_)
if self.role == consts.GUEST:
if with_weight(data_instances):
LOGGER.info(f"data with sample weight, use sample weight.")
if self.model_param.early_stop == "diff":
LOGGER.warning("input data with weight, please use 'weight_diff' for 'early_stop'.")
data_instances = scale_sample_weight(data_instances)
self.batch_generator.initialize_batch_generator(data_instances, batch_size=self.batch_size)
with SPDZ(
"hetero_sshe",
local_party=self.local_party,
all_parties=self.parties,
q_field=self.q_field,
use_mix_rand=self.model_param.use_mix_rand,
) as spdz:
spdz.set_flowid(self.flowid)
self.secure_matrix_obj.set_flowid(self.flowid)
# not sharing the model when reveal_every_iter
if not self.reveal_every_iter:
w_self, w_remote = self.share_model(w, suffix="init")
last_w_self, last_w_remote = w_self, w_remote
LOGGER.debug(f"first_w_self shape: {w_self.shape}, w_remote_shape: {w_remote.shape}")
batch_data_generator = self.batch_generator.generate_batch_data()
encoded_batch_data = []
batch_labels_list = []
batch_weight_list = []
for batch_data in batch_data_generator:
if self.fit_intercept:
batch_features = batch_data.mapValues(lambda x: np.hstack((x.features, 1.0)))
else:
batch_features = batch_data.mapValues(lambda x: x.features)
if self.role == consts.GUEST:
batch_labels = batch_data.mapValues(lambda x: np.array([x.label], dtype=self.label_type))
batch_labels_list.append(batch_labels)
if self.weight:
batch_weight = batch_data.mapValues(lambda x: np.array([x.weight], dtype=float))
batch_weight_list.append(batch_weight)
else:
batch_weight_list.append(None)
self.batch_num.append(batch_data.count())
encoded_batch_data.append(
fixedpoint_table.FixedPointTensor(self.fixedpoint_encoder.encode(batch_features),
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder))
while self.n_iter_ < self.max_iter:
self.callback_list.on_epoch_begin(self.n_iter_)
LOGGER.info(f"start to n_iter: {self.n_iter_}")
loss_list = []
self.optimizer.set_iters(self.n_iter_)
if not self.reveal_every_iter:
self.self_optimizer.set_iters(self.n_iter_)
self.remote_optimizer.set_iters(self.n_iter_)
for batch_idx, batch_data in enumerate(encoded_batch_data):
current_suffix = (str(self.n_iter_), str(batch_idx))
if self.role == consts.GUEST:
batch_labels = batch_labels_list[batch_idx]
batch_weight = batch_weight_list[batch_idx]
else:
batch_labels = None
batch_weight = None
if self.reveal_every_iter:
y = self.forward(weights=self.model_weights,
features=batch_data,
labels=batch_labels,
suffix=current_suffix,
cipher=self.cipher,
batch_weight=batch_weight)
else:
y = self.forward(weights=(w_self, w_remote),
features=batch_data,
labels=batch_labels,
suffix=current_suffix,
cipher=self.cipher,
batch_weight=batch_weight)
if self.role == consts.GUEST:
if self.weight:
error = y - batch_labels.join(batch_weight, lambda y, b: y * b)
else:
error = y - batch_labels
self_g, remote_g = self.backward(error=error,
features=batch_data,
suffix=current_suffix,
cipher=self.cipher)
else:
self_g, remote_g = self.backward(error=y,
features=batch_data,
suffix=current_suffix,
cipher=self.cipher)
# loss computing;
suffix = ("loss",) + current_suffix
if self.reveal_every_iter:
batch_loss = self.compute_loss(weights=self.model_weights,
labels=batch_labels,
suffix=suffix,
cipher=self.cipher)
else:
batch_loss = self.compute_loss(weights=(w_self, w_remote),
labels=batch_labels,
suffix=suffix,
cipher=self.cipher)
if batch_loss is not None:
batch_loss = batch_loss * self.batch_num[batch_idx]
loss_list.append(batch_loss)
if self.reveal_every_iter:
# LOGGER.debug(f"before reveal: self_g shape: {self_g.shape}, remote_g_shape: {remote_g},"
# f"self_g: {self_g}")
new_g = self.reveal_models(self_g, remote_g, suffix=current_suffix)
# LOGGER.debug(f"after reveal: new_g shape: {new_g.shape}, new_g: {new_g}"
# f"self.model_param.reveal_strategy: {self.model_param.reveal_strategy}")
if new_g is not None:
self.model_weights = self.optimizer.update_model(self.model_weights, new_g,
has_applied=False)
else:
self.model_weights = LinearModelWeights(
l=np.zeros(self_g.shape),
fit_intercept=self.model_param.init_param.fit_intercept)
else:
if self.optimizer.penalty == consts.L2_PENALTY:
self_g = self_g + self.self_optimizer.alpha * w_self
remote_g = remote_g + self.remote_optimizer.alpha * w_remote
# LOGGER.debug(f"before optimizer: {self_g}, {remote_g}")
self_g = self.self_optimizer.apply_gradients(self_g)
remote_g = self.remote_optimizer.apply_gradients(remote_g)
# LOGGER.debug(f"after optimizer: {self_g}, {remote_g}")
w_self -= self_g
w_remote -= remote_g
LOGGER.debug(f"w_self shape: {w_self.shape}, w_remote_shape: {w_remote.shape}")
if self.role == consts.GUEST:
loss = np.sum(loss_list) / instances_count
self.loss_history.append(loss)
if self.need_call_back_loss:
self.callback_loss(self.n_iter_, loss)
else:
loss = None
if self.converge_func_name in ["diff", "abs"]:
self.is_converged = self.check_converge_by_loss(loss, suffix=(str(self.n_iter_),))
elif self.converge_func_name == "weight_diff":
if self.reveal_every_iter:
self.is_converged = self.check_converge_by_weights(
last_w=last_models.unboxed,
new_w=self.model_weights.unboxed,
suffix=(str(self.n_iter_),))
last_models = copy.deepcopy(self.model_weights)
else:
self.is_converged = self.check_converge_by_weights(
last_w=(last_w_self, last_w_remote),
new_w=(w_self, w_remote),
suffix=(str(self.n_iter_),))
last_w_self, last_w_remote = copy.deepcopy(w_self), copy.deepcopy(w_remote)
else:
raise ValueError(f"Cannot recognize early_stop function: {self.converge_func_name}")
LOGGER.info("iter: {}, is_converged: {}".format(self.n_iter_, self.is_converged))
self.callback_list.on_epoch_end(self.n_iter_)
self.n_iter_ += 1
if self.stop_training:
break
if self.is_converged:
break
# Finally reconstruct
if not self.reveal_every_iter:
new_w = self.reveal_models(w_self, w_remote, suffix=("final",))
if new_w is not None:
self.model_weights = LinearModelWeights(
l=new_w,
fit_intercept=self.model_param.init_param.fit_intercept)
LOGGER.debug(f"loss_history: {self.loss_history}")
self.set_summary(self.get_model_summary())
def get_model_summary(self):
summary = super().get_model_summary()
if not self.is_respectively_reveal:
del summary["intercept"]
del summary["coef"]
return summary
class HeteroSSHEGuestBase(HeteroSSHEBase, ABC):
def __init__(self):
super().__init__()
self.role = consts.GUEST
self.local_party = get_parties().local_party
self.other_party = get_parties().roles_to_parties(["host"])[0]
self.parties = [self.local_party] + [self.other_party]
self.encrypted_error = None
self.encrypted_wx = None
self.z_square = None
self.wx_self = None
self.wx_remote = None
def _init_model(self, params):
super()._init_model(params)
# self.batch_generator = batch_generator.Guest()
# self.batch_generator.register_batch_generator(BatchGeneratorTransferVariable(), has_arbiter=False)
def _transfer_q_field(self):
q_field = self.cipher.public_key.n
self.transfer_variable.q_field.remote(q_field, role=consts.HOST, suffix=("q_field",))
return q_field
def _cal_z(self, weights, features, suffix, cipher):
if not self.reveal_every_iter:
LOGGER.info(f"[forward]: Calculate z in share...")
w_self, w_remote = weights
z = self._cal_z_in_share(w_self, w_remote, features, suffix, cipher)
else:
LOGGER.info(f"[forward]: Calculate z directly...")
w = weights.unboxed
z = features.dot_local(w)
remote_z = self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=False,
cipher=None,
z=None)[0]
self.wx_self = z
self.wx_remote = remote_z
def _cal_z_in_share(self, w_self, w_remote, features, suffix, cipher):
z1 = features.dot_local(w_self)
za_suffix = ("za",) + suffix
za_share = self.secure_matrix_obj.secure_matrix_mul(w_remote,
tensor_name=".".join(za_suffix),
cipher=cipher,
suffix=za_suffix)
zb_suffix = ("zb",) + suffix
zb_share = self.secure_matrix_obj.secure_matrix_mul(features,
tensor_name=".".join(zb_suffix),
cipher=None,
suffix=zb_suffix)
z = z1 + za_share + zb_share
return z
def backward(self, error, features, suffix, cipher):
LOGGER.info(f"[backward]: Calculate gradient...")
batch_num = self.batch_num[int(suffix[1])]
error_1_n = error * (1 / batch_num)
ga2_suffix = ("ga2",) + suffix
ga2_2 = self.secure_matrix_obj.secure_matrix_mul(error_1_n,
tensor_name=".".join(ga2_suffix),
cipher=cipher,
suffix=ga2_suffix,
is_fixedpoint_table=False)
# LOGGER.debug(f"ga2_2: {ga2_2}")
encrypt_g = self.encrypted_error.dot(features) * (1 / batch_num)
# LOGGER.debug(f"encrypt_g: {encrypt_g}")
tensor_name = ".".join(("encrypt_g",) + suffix)
gb2 = SecureMatrix.from_source(tensor_name,
encrypt_g,
self.cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder)
# LOGGER.debug(f"gb2: {gb2}")
return gb2, ga2_2
def share_model(self, w, suffix):
source = [w, self.other_party]
wb, wa = (
fixedpoint_numpy.FixedPointTensor.from_source(f"wb_{suffix}", source[0],
encoder=self.fixedpoint_encoder,
q_field=self.q_field),
fixedpoint_numpy.FixedPointTensor.from_source(f"wa_{suffix}", source[1],
encoder=self.fixedpoint_encoder,
q_field=self.q_field),
)
return wb, wa
def reveal_models(self, w_self, w_remote, suffix=None):
if suffix is None:
suffix = self.n_iter_
if self.model_param.reveal_strategy == "respectively":
new_w = w_self.get(tensor_name=f"wb_{suffix}",
broadcast=False)
w_remote.broadcast_reconstruct_share(tensor_name=f"wa_{suffix}")
elif self.model_param.reveal_strategy == "encrypted_reveal_in_host":
new_w = w_self.get(tensor_name=f"wb_{suffix}",
broadcast=False)
encrypted_w_remote = self.cipher.recursive_encrypt(self.fixedpoint_encoder.decode(w_remote.value))
encrypted_w_remote_tensor = fixedpoint_numpy.PaillierFixedPointTensor(value=encrypted_w_remote)
encrypted_w_remote_tensor.broadcast_reconstruct_share(tensor_name=f"wa_{suffix}")
else:
raise NotImplementedError(f"reveal strategy: {self.model_param.reveal_strategy} has not been implemented.")
return new_w
def _reveal_every_iter_weights_check(self, last_w, new_w, suffix):
square_sum = np.sum((last_w - new_w) ** 2)
host_sums = self.converge_transfer_variable.square_sum.get(suffix=suffix)
for hs in host_sums:
square_sum += hs
weight_diff = np.sqrt(square_sum)
is_converge = False
if weight_diff < self.model_param.tol:
is_converge = True
LOGGER.info(f"n_iter: {self.n_iter_}, weight_diff: {weight_diff}")
self.converge_transfer_variable.converge_info.remote(is_converge, role=consts.HOST, suffix=suffix)
return is_converge
def check_converge_by_loss(self, loss, suffix):
self.is_converged = self.converge_func.is_converge(loss)
self.transfer_variable.is_converged.remote(self.is_converged, suffix=suffix)
return self.is_converged
def prepare_fit(self, data_instances, validate_data):
# self.transfer_variable = SSHEModelTransferVariable()
self.batch_generator = batch_generator.Guest()
self.batch_generator.register_batch_generator(BatchGeneratorTransferVariable(), has_arbiter=False)
self.header = copy.deepcopy(data_instances.schema.get("header", []))
self._abnormal_detection(data_instances)
self.check_abnormal_values(data_instances)
self.check_abnormal_values(validate_data)
def get_single_model_param(self, model_weights=None, header=None):
result = super().get_single_model_param(model_weights, header)
result['weight'] = self.get_single_model_weight_dict(model_weights, header)
if not self.is_respectively_reveal:
result["cipher"] = dict(public_key=dict(n=str(self.cipher.public_key.n)),
private_key=dict(p=str(self.cipher.privacy_key.p),
q=str(self.cipher.privacy_key.q)))
return result
def load_single_model(self, single_model_obj):
LOGGER.info("start to load single model")
self.load_single_model_weight(single_model_obj)
self.n_iter_ = single_model_obj.iters
if not self.is_respectively_reveal:
cipher_info = single_model_obj.cipher
self.cipher = PaillierEncrypt()
public_key = PaillierPublicKey(int(cipher_info.public_key.n))
privacy_key = PaillierPrivateKey(public_key, int(cipher_info.private_key.p), int(cipher_info.private_key.q))
self.cipher.set_public_key(public_key=public_key)
self.cipher.set_privacy_key(privacy_key=privacy_key)
return self
class HeteroSSHEHostBase(HeteroSSHEBase, ABC):
def __init__(self):
super().__init__()
self.role = consts.HOST
self.local_party = get_parties().local_party
self.other_party = get_parties().roles_to_parties(["guest"])[0]
self.parties = [self.other_party] + [self.local_party]
self.wx_self = None
def _init_model(self, params):
super()._init_model(params)
self.init_param_obj.fit_intercept = False
# self.batch_generator = batch_generator.Host()
# self.batch_generator.register_batch_generator(BatchGeneratorTransferVariable(), has_arbiter=False)
def _transfer_q_field(self):
q_field = self.transfer_variable.q_field.get(role=consts.GUEST, idx=0,
suffix=("q_field",))
return q_field
def _cal_z_in_share(self, w_self, w_remote, features, suffix, cipher):
z1 = features.dot_local(w_self)
za_suffix = ("za",) + suffix
za_share = self.secure_matrix_obj.secure_matrix_mul(features,
tensor_name=".".join(za_suffix),
cipher=None,
suffix=za_suffix)
zb_suffix = ("zb",) + suffix
zb_share = self.secure_matrix_obj.secure_matrix_mul(w_remote,
tensor_name=".".join(zb_suffix),
cipher=cipher,
suffix=zb_suffix)
z = z1 + za_share + zb_share
return z
def backward(self, error: fixedpoint_table.FixedPointTensor, features, suffix, cipher):
LOGGER.info(f"[backward]: Calculate gradient...")
batch_num = self.batch_num[int(suffix[1])]
ga = features.dot_local(error)
# LOGGER.debug(f"ga: {ga}, batch_num: {batch_num}")
ga = ga * (1 / batch_num)
zb_suffix = ("ga2",) + suffix
ga2_1 = self.secure_matrix_obj.secure_matrix_mul(features,
tensor_name=".".join(zb_suffix),
cipher=None,
suffix=zb_suffix)
# LOGGER.debug(f"ga2_1: {ga2_1}")
ga_new = ga + ga2_1
tensor_name = ".".join(("encrypt_g",) + suffix)
gb1 = SecureMatrix.from_source(tensor_name,
self.other_party,
cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder,
is_fixedpoint_table=False)
# LOGGER.debug(f"gb1: {gb1}")
return ga_new, gb1
def share_model(self, w, suffix):
source = [w, self.other_party]
wa, wb = (
fixedpoint_numpy.FixedPointTensor.from_source(f"wa_{suffix}", source[0],
encoder=self.fixedpoint_encoder,
q_field=self.q_field),
fixedpoint_numpy.FixedPointTensor.from_source(f"wb_{suffix}", source[1],
encoder=self.fixedpoint_encoder,
q_field=self.q_field),
)
return wa, wb
def reveal_models(self, w_self, w_remote, suffix=None):
if suffix is None:
suffix = self.n_iter_
if self.model_param.reveal_strategy == "respectively":
w_remote.broadcast_reconstruct_share(tensor_name=f"wb_{suffix}")
new_w = w_self.get(tensor_name=f"wa_{suffix}",
broadcast=False)
elif self.model_param.reveal_strategy == "encrypted_reveal_in_host":
w_remote.broadcast_reconstruct_share(tensor_name=f"wb_{suffix}")
new_w = w_self.reconstruct(tensor_name=f"wa_{suffix}", broadcast=False)
else:
raise NotImplementedError(f"reveal strategy: {self.model_param.reveal_strategy} has not been implemented.")
return new_w
def _reveal_every_iter_weights_check(self, last_w, new_w, suffix):
square_sum = np.sum((last_w - new_w) ** 2)
self.converge_transfer_variable.square_sum.remote(square_sum, role=consts.GUEST, idx=0, suffix=suffix)
return self.converge_transfer_variable.converge_info.get(idx=0, suffix=suffix)
def check_converge_by_loss(self, loss, suffix):
self.is_converged = self.transfer_variable.is_converged.get(idx=0, suffix=suffix)
return self.is_converged
def get_single_encrypted_model_weight_dict(self, model_weights=None, header=None):
weight_dict = {}
model_weights = model_weights if model_weights else self.model_weights
header = header if header else self.header
for idx, header_name in enumerate(header):
coef_i = model_weights.coef_[idx]
is_obfuscator = False
if hasattr(coef_i, "__is_obfuscator"):
is_obfuscator = getattr(coef_i, "__is_obfuscator")
public_key = sshe_cipher_param_pb2.CipherPublicKey(n=str(coef_i.public_key.n))
weight_dict[header_name] = sshe_cipher_param_pb2.CipherText(public_key=public_key,
cipher_text=str(coef_i.ciphertext()),
exponent=str(coef_i.exponent),
is_obfuscator=is_obfuscator)
return weight_dict
def prepare_fit(self, data_instances, validate_data):
self.batch_generator = batch_generator.Host()
self.batch_generator.register_batch_generator(BatchGeneratorTransferVariable(), has_arbiter=False)
self.header = copy.deepcopy(data_instances.schema.get("header", []))
self._abnormal_detection(data_instances)
self.check_abnormal_values(data_instances)
self.check_abnormal_values(validate_data)
def get_single_model_param(self, model_weights=None, header=None):
result = super().get_single_model_param(model_weights, header)
if self.is_respectively_reveal:
result['weight'] = self.get_single_model_weight_dict(model_weights, header)
else:
result["encrypted_weight"] = self.get_single_encrypted_model_weight_dict(model_weights, header)
return result
def load_single_model(self, single_model_obj):
LOGGER.info("start to load single model")
if self.is_respectively_reveal:
self.load_single_model_weight(single_model_obj)
else:
feature_shape = len(self.header)
tmp_vars = [None] * feature_shape
weight_dict = dict(single_model_obj.encrypted_weight)
for idx, header_name in enumerate(self.header):
cipher_weight = weight_dict.get(header_name)
public_key = PaillierPublicKey(int(cipher_weight.public_key.n))
cipher_text = int(cipher_weight.cipher_text)
exponent = int(cipher_weight.exponent)
is_obfuscator = cipher_weight.is_obfuscator
coef_i = PaillierEncryptedNumber(public_key, cipher_text, exponent)
if is_obfuscator:
coef_i.apply_obfuscator()
tmp_vars[idx] = coef_i
self.model_weights = LinearModelWeights(tmp_vars, fit_intercept=self.fit_intercept)
self.n_iter_ = single_model_obj.iters
return self
| 36,689 | 45.738854 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_logistic_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 614 | 40 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_logistic_regression/hetero_lr_guest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import numpy as np
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_model import HeteroSSHEGuestBase
from federatedml.one_vs_rest.one_vs_rest import one_vs_rest_factory
from federatedml.optim import activation
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.hetero_sshe_lr_param import HeteroSSHELRParam
from federatedml.protobuf.generated import lr_model_param_pb2, lr_model_meta_pb2
from federatedml.secureprotol.spdz.secure_matrix.secure_matrix import SecureMatrix
from federatedml.secureprotol.spdz.tensor import fixedpoint_numpy
from federatedml.util import consts, fate_operator, LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroLRGuest(HeteroSSHEGuestBase):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLogisticRegression'
self.model_param_name = 'HeteroLogisticRegressionParam'
self.model_meta_name = 'HeteroLogisticRegressionMeta'
self.model_param = HeteroSSHELRParam()
# self.labels = None
self.one_vs_rest_obj = None
self.label_type = int
def _init_model(self, params):
super()._init_model(params)
self.one_vs_rest_obj = one_vs_rest_factory(self, role=self.role, mode=self.mode, has_arbiter=False)
def _compute_sigmoid(self, z, remote_z):
complete_z = z + remote_z
sigmoid_z = complete_z * 0.25 + 0.5
return sigmoid_z
def forward(self, weights, features, labels, suffix, cipher, batch_weight):
self._cal_z(weights, features, suffix, cipher)
sigmoid_z = self._compute_sigmoid(self.wx_self, self.wx_remote)
self.encrypted_wx = self.wx_self + self.wx_remote
self.encrypted_error = sigmoid_z - labels
if batch_weight:
sigmoid_z = sigmoid_z * batch_weight
self.encrypted_error = self.encrypted_error * batch_weight
tensor_name = ".".join(("sigmoid_z",) + suffix)
shared_sigmoid_z = SecureMatrix.from_source(tensor_name,
sigmoid_z,
cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder)
return shared_sigmoid_z
def compute_loss(self, weights, labels, suffix, cipher=None):
"""
Use Taylor series expand log loss:
Loss = - y * log(h(x)) - (1-y) * log(1 - h(x)) where h(x) = 1/(1+exp(-wx))
Then loss' = - (1/N)*∑(log(1/2) - 1/2*wx + ywx -1/8(wx)^2)
"""
LOGGER.info(f"[compute_loss]: Calculate loss ...")
wx = (-0.5 * self.encrypted_wx).reduce(operator.add)
ywx = (self.encrypted_wx * labels).reduce(operator.add)
wx_square = (2 * self.wx_remote * self.wx_self).reduce(operator.add) + \
(self.wx_self * self.wx_self).reduce(operator.add)
wx_remote_square = self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=False,
cipher=None,
wx_self_square=None)[0]
wx_square = (wx_remote_square + wx_square) * -0.125
batch_num = self.batch_num[int(suffix[2])]
loss = (wx + ywx + wx_square) * (-1 / batch_num) - np.log(0.5)
tensor_name = ".".join(("shared_loss",) + suffix)
share_loss = SecureMatrix.from_source(tensor_name=tensor_name,
source=loss,
cipher=None,
q_field=self.fixedpoint_encoder.n,
encoder=self.fixedpoint_encoder)
tensor_name = ".".join(("loss",) + suffix)
loss = share_loss.get(tensor_name=tensor_name,
broadcast=False)[0]
if self.reveal_every_iter:
loss_norm = self.optimizer.loss_norm(weights)
if loss_norm:
loss += loss_norm
else:
if self.optimizer.penalty == consts.L2_PENALTY:
w_self, w_remote = weights
w_encode = np.hstack((w_remote.value, w_self.value))
w_encode = np.array([w_encode])
w_tensor_name = ".".join(("loss_norm_w",) + suffix)
w_tensor = fixedpoint_numpy.FixedPointTensor(value=w_encode,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_name)
w_tensor_transpose_name = ".".join(("loss_norm_w_transpose",) + suffix)
w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(value=w_encode.T,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_transpose_name)
loss_norm_tensor_name = ".".join(("loss_norm",) + suffix)
loss_norm = w_tensor.dot(w_tensor_transpose, target_name=loss_norm_tensor_name).get(broadcast=False)
loss_norm = 0.5 * self.optimizer.alpha * loss_norm[0][0]
loss = loss + loss_norm
LOGGER.info(f"[compute_loss]: loss={loss}, reveal_every_iter={self.reveal_every_iter}")
return loss
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of lr
Parameters
----------
data_instances: Table of Instance, input data
Returns
----------
Table
include input data label, predict probably, label
"""
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
if self.need_one_vs_rest:
predict_result = self.one_vs_rest_obj.predict(data_instances)
return predict_result
LOGGER.debug(
f"Before_predict_reveal_strategy: {self.model_param.reveal_strategy}, {self.is_respectively_reveal}")
def _vec_dot(v, coef, intercept):
return fate_operator.vec_dot(v.features, coef) + intercept
f = functools.partial(_vec_dot,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
pred_prob = data_instances.mapValues(f)
host_probs = self.transfer_variable.host_prob.get(idx=-1)
LOGGER.info("Get probability from Host")
# guest probability
for host_prob in host_probs:
if not self.is_respectively_reveal:
host_prob = self.cipher.distribute_decrypt(host_prob)
pred_prob = pred_prob.join(host_prob, lambda g, h: g + h)
pred_prob = pred_prob.mapValues(lambda p: activation.sigmoid(p))
threshold = self.model_param.predict_param.threshold
predict_result = self.predict_score_to_output(data_instances, pred_prob, classes=[0, 1], threshold=threshold)
return predict_result
def _get_param(self):
if self.need_cv:
param_protobuf_obj = lr_model_param_pb2.LRModelParam()
return param_protobuf_obj
if self.need_one_vs_rest:
one_vs_rest_result = self.one_vs_rest_obj.save(lr_model_param_pb2.SingleModel)
single_result = {'header': self.header, 'need_one_vs_rest': True, "best_iteration": -1}
else:
one_vs_rest_result = None
single_result = self.get_single_model_param()
single_result['need_one_vs_rest'] = False
single_result['one_vs_rest_result'] = one_vs_rest_result
LOGGER.debug(f"saved_model: {single_result}")
param_protobuf_obj = lr_model_param_pb2.LRModelParam(**single_result)
return param_protobuf_obj
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
need_one_vs_rest=self.need_one_vs_rest,
reveal_strategy=self.model_param.reveal_strategy)
return meta_protobuf_obj
def load_model(self, model_dict):
result_obj, _ = super().load_model(model_dict)
need_one_vs_rest = result_obj.need_one_vs_rest
LOGGER.info("in _load_model need_one_vs_rest: {}".format(need_one_vs_rest))
if need_one_vs_rest:
one_vs_rest_result = result_obj.one_vs_rest_result
self.one_vs_rest_obj = one_vs_rest_factory(classifier=self, role=consts.GUEST,
mode=self.mode, has_arbiter=False)
self.one_vs_rest_obj.load_model(one_vs_rest_result)
self.need_one_vs_rest = True
else:
self.load_single_model(result_obj)
self.need_one_vs_rest = False
def fit(self, data_instances, validate_data=None):
LOGGER.info("Starting to fit hetero_sshe_logistic_regression")
self.prepare_fit(data_instances, validate_data)
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def one_vs_rest_fit(self, train_data=None, validate_data=None):
LOGGER.info("Class num larger than 2, do one_vs_rest")
self.one_vs_rest_obj.fit(data_instances=train_data, validate_data=validate_data)
def fit_binary(self, data_instances, validate_data=None):
self.fit_single_model(data_instances, validate_data)
def get_model_summary(self):
summary = super().get_model_summary()
summary["one_vs_rest"] = self.need_one_vs_rest
return summary
def get_metrics_param(self):
if self.need_one_vs_rest:
eval_type = 'multi'
else:
eval_type = "binary"
return EvaluateParam(eval_type=eval_type, metrics=self.metrics)
| 12,098 | 44.829545 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_logistic_regression/hetero_lr_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import numpy as np
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_model import HeteroSSHEHostBase
from federatedml.one_vs_rest.one_vs_rest import one_vs_rest_factory
from federatedml.param.hetero_sshe_lr_param import HeteroSSHELRParam
from federatedml.protobuf.generated import lr_model_param_pb2, lr_model_meta_pb2
from federatedml.secureprotol.spdz.secure_matrix.secure_matrix import SecureMatrix
from federatedml.secureprotol.spdz.tensor import fixedpoint_numpy
from federatedml.util import consts, fate_operator, LOGGER
class HeteroLRHost(HeteroSSHEHostBase):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLogisticRegression'
self.model_param_name = 'HeteroLogisticRegressionParam'
self.model_meta_name = 'HeteroLogisticRegressionMeta'
self.model_param = HeteroSSHELRParam()
self.labels = None
self.one_vs_rest_obj = None
def _init_model(self, params):
super()._init_model(params)
self.one_vs_rest_obj = one_vs_rest_factory(self, role=self.role, mode=self.mode, has_arbiter=False)
def forward(self, weights, features, labels, suffix, cipher, batch_weight=None):
if not self.reveal_every_iter:
LOGGER.info(f"[forward]: Calculate z in share...")
w_self, w_remote = weights
z = self._cal_z_in_share(w_self, w_remote, features, suffix, self.cipher)
else:
LOGGER.info(f"[forward]: Calculate z directly...")
w = weights.unboxed
z = features.dot_local(w)
self.wx_self = z
self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=True,
cipher=cipher,
z=z)
tensor_name = ".".join(("sigmoid_z",) + suffix)
shared_sigmoid_z = SecureMatrix.from_source(tensor_name,
self.other_party,
cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder)
return shared_sigmoid_z
def compute_loss(self, weights=None, labels=None, suffix=None, cipher=None):
"""
Use Taylor series expand log loss:
Loss = - y * log(h(x)) - (1-y) * log(1 - h(x)) where h(x) = 1/(1+exp(-wx))
Then loss' = - (1/N)*∑(log(1/2) - 1/2*wx + ywx - 1/8(wx)^2)
"""
LOGGER.info(f"[compute_loss]: Calculate loss ...")
wx_self_square = (self.wx_self * self.wx_self).reduce(operator.add)
self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=True,
cipher=cipher,
wx_self_square=wx_self_square)
tensor_name = ".".join(("shared_loss",) + suffix)
share_loss = SecureMatrix.from_source(tensor_name=tensor_name,
source=self.other_party,
cipher=cipher,
q_field=self.fixedpoint_encoder.n,
encoder=self.fixedpoint_encoder,
is_fixedpoint_table=False)
if self.reveal_every_iter:
loss_norm = self.optimizer.loss_norm(weights)
if loss_norm:
share_loss += loss_norm
LOGGER.debug(f"share_loss+loss_norm: {share_loss}")
tensor_name = ".".join(("loss",) + suffix)
share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
else:
tensor_name = ".".join(("loss",) + suffix)
share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
if self.optimizer.penalty == consts.L2_PENALTY:
w_self, w_remote = weights
w_encode = np.hstack((w_self.value, w_remote.value))
w_encode = np.array([w_encode])
w_tensor_name = ".".join(("loss_norm_w",) + suffix)
w_tensor = fixedpoint_numpy.FixedPointTensor(value=w_encode,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_name)
w_tensor_transpose_name = ".".join(("loss_norm_w_transpose",) + suffix)
w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(value=w_encode.T,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_transpose_name)
loss_norm_tensor_name = ".".join(("loss_norm",) + suffix)
loss_norm = w_tensor.dot(w_tensor_transpose, target_name=loss_norm_tensor_name)
loss_norm.broadcast_reconstruct_share()
def predict(self, data_instances):
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
if self.need_one_vs_rest:
self.one_vs_rest_obj.predict(data_instances)
return
LOGGER.debug(f"Before_predict_reveal_strategy: {self.model_param.reveal_strategy},"
f" {self.is_respectively_reveal}")
def _vec_dot(v, coef, intercept):
return fate_operator.vec_dot(v.features, coef) + intercept
f = functools.partial(_vec_dot,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
prob_host = data_instances.mapValues(f)
self.transfer_variable.host_prob.remote(prob_host, role=consts.GUEST, idx=0)
LOGGER.info("Remote probability to Guest")
def _get_param(self):
if self.need_cv:
param_protobuf_obj = lr_model_param_pb2.LRModelParam()
return param_protobuf_obj
self.header = self.header if self.header else []
LOGGER.debug("In get_param, self.need_one_vs_rest: {}".format(self.need_one_vs_rest))
if self.need_one_vs_rest:
one_vs_rest_result = self.one_vs_rest_obj.save(lr_model_param_pb2.SingleModel)
single_result = {'header': self.header, 'need_one_vs_rest': True, "best_iteration": -1}
else:
one_vs_rest_result = None
single_result = self.get_single_model_param()
single_result['need_one_vs_rest'] = False
single_result['one_vs_rest_result'] = one_vs_rest_result
param_protobuf_obj = lr_model_param_pb2.LRModelParam(**single_result)
return param_protobuf_obj
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
need_one_vs_rest=self.need_one_vs_rest,
reveal_strategy=self.model_param.reveal_strategy)
return meta_protobuf_obj
def load_model(self, model_dict):
result_obj, _ = super().load_model(model_dict)
need_one_vs_rest = result_obj.need_one_vs_rest
LOGGER.info("in _load_model need_one_vs_rest: {}".format(need_one_vs_rest))
if need_one_vs_rest:
one_vs_rest_result = result_obj.one_vs_rest_result
self.one_vs_rest_obj = one_vs_rest_factory(classifier=self, role=consts.HOST,
mode=self.mode, has_arbiter=False)
self.one_vs_rest_obj.load_model(one_vs_rest_result)
self.need_one_vs_rest = True
else:
self.load_single_model(result_obj)
self.need_one_vs_rest = False
def fit(self, data_instances, validate_data=None):
LOGGER.info("Starting to fit hetero_sshe_logistic_regression")
self.prepare_fit(data_instances, validate_data)
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def one_vs_rest_fit(self, train_data=None, validate_data=None):
LOGGER.info("Class num larger than 2, do one_vs_rest")
self.one_vs_rest_obj.fit(data_instances=train_data, validate_data=validate_data)
def fit_binary(self, data_instances, validate_data=None):
self.fit_single_model(data_instances, validate_data)
def get_model_summary(self):
summary = super().get_model_summary()
summary["one_vs_rest"] = self.need_one_vs_rest
return summary
| 10,733 | 47.790909 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_linear_regression/hetero_linr_guest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import numpy as np
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_model import HeteroSSHEGuestBase
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.hetero_sshe_linr_param import HeteroSSHELinRParam
from federatedml.protobuf.generated import linr_model_param_pb2, linr_model_meta_pb2
from federatedml.secureprotol.spdz.secure_matrix.secure_matrix import SecureMatrix
from federatedml.secureprotol.spdz.tensor import fixedpoint_numpy
from federatedml.util import consts, fate_operator, LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
class HeteroLinRGuest(HeteroSSHEGuestBase):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLinearRegression'
self.model_param_name = 'HeteroLinearRegressionParam'
self.model_meta_name = 'HeteroLinearRegressionMeta'
self.model_param = HeteroSSHELinRParam()
# self.labels = None
self.label_type = float
def forward(self, weights, features, labels, suffix, cipher, batch_weight):
self._cal_z(weights, features, suffix, cipher)
complete_z = self.wx_self + self.wx_remote
self.encrypted_wx = complete_z
self.encrypted_error = complete_z - labels
if batch_weight:
complete_z = complete_z * batch_weight
self.encrypted_error = self.encrypted_error * batch_weight
tensor_name = ".".join(("complete_z",) + suffix)
shared_z = SecureMatrix.from_source(tensor_name,
complete_z,
cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder)
return shared_z
def compute_loss(self, weights, labels, suffix, cipher=None):
"""
Compute hetero linr loss:
loss = (1/N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
(wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2 * (wx_h * (wx_g - y))
"""
LOGGER.info(f"[compute_loss]: Calculate loss ...")
wxy_self = self.wx_self - labels
wxy_self_square = (wxy_self * wxy_self).reduce(operator.add)
wxy = (self.wx_remote * wxy_self).reduce(operator.add)
wx_remote_square = self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=False,
cipher=None,
wx_self_square=None)[0]
loss = (wx_remote_square + wxy_self_square) + wxy * 2
batch_num = self.batch_num[int(suffix[2])]
loss = loss * (1 / (batch_num * 2))
# loss = (wx_remote_square + wxy_self_square + 2 * wxy) / (2 * batch_num)
tensor_name = ".".join(("shared_loss",) + suffix)
share_loss = SecureMatrix.from_source(tensor_name=tensor_name,
source=loss,
cipher=None,
q_field=self.fixedpoint_encoder.n,
encoder=self.fixedpoint_encoder)
tensor_name = ".".join(("loss",) + suffix)
loss = share_loss.get(tensor_name=tensor_name,
broadcast=False)[0]
if self.reveal_every_iter:
loss_norm = self.optimizer.loss_norm(weights)
if loss_norm:
loss += loss_norm
else:
if self.optimizer.penalty == consts.L2_PENALTY:
w_self, w_remote = weights
w_encode = np.hstack((w_remote.value, w_self.value))
w_encode = np.array([w_encode])
w_tensor_name = ".".join(("loss_norm_w",) + suffix)
w_tensor = fixedpoint_numpy.FixedPointTensor(value=w_encode,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_name)
w_tensor_transpose_name = ".".join(("loss_norm_w_transpose",) + suffix)
w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(value=w_encode.T,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_transpose_name)
loss_norm_tensor_name = ".".join(("loss_norm",) + suffix)
loss_norm = w_tensor.dot(w_tensor_transpose, target_name=loss_norm_tensor_name).get(broadcast=False)
loss_norm = 0.5 * self.optimizer.alpha * loss_norm[0][0]
loss = loss + loss_norm
LOGGER.info(f"[compute_loss]: loss={loss}, reveal_every_iter={self.reveal_every_iter}")
return loss
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Prediction of linr
Parameters
----------
data_instances: Table of Instance, input data
Returns
----------
Table
include input data label, predict result, predicted label
"""
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
LOGGER.debug(
f"Before_predict_reveal_strategy: {self.model_param.reveal_strategy}, {self.is_respectively_reveal}")
def _vec_dot(v, coef, intercept):
return fate_operator.vec_dot(v.features, coef) + intercept
f = functools.partial(_vec_dot,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
pred_res = data_instances.mapValues(f)
host_preds = self.transfer_variable.host_prob.get(idx=-1)
LOGGER.info("Get probability from Host")
for host_pred in host_preds:
if not self.is_respectively_reveal:
host_pred = self.cipher.distribute_decrypt(host_pred)
pred_res = pred_res.join(host_pred, lambda g, h: g + h)
predict_result = self.predict_score_to_output(data_instances=data_instances,
predict_score=pred_res,
classes=None)
return predict_result
def _get_param(self):
if self.need_cv:
param_protobuf_obj = linr_model_param_pb2.LinRModelParam()
return param_protobuf_obj
single_result = self.get_single_model_param()
param_protobuf_obj = linr_model_param_pb2.LinRModelParam(**single_result)
return param_protobuf_obj
def _get_meta(self):
meta_protobuf_obj = linr_model_meta_pb2.LinRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
reveal_strategy=self.model_param.reveal_strategy)
return meta_protobuf_obj
def load_model(self, model_dict):
result_obj, _ = super().load_model(model_dict)
self.load_single_model(result_obj)
def fit(self, data_instances, validate_data=None):
LOGGER.info("Starting to fit hetero_sshe_linear_regression")
self.prepare_fit(data_instances, validate_data)
self.fit_single_model(data_instances, validate_data)
def get_metrics_param(self):
return EvaluateParam(eval_type="regression", metrics=self.metrics)
| 9,292 | 45.00495 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_linear_regression/hetero_linr_host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import numpy as np
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_model import HeteroSSHEHostBase
from federatedml.param.hetero_sshe_linr_param import HeteroSSHELinRParam
from federatedml.protobuf.generated import linr_model_param_pb2, linr_model_meta_pb2
from federatedml.secureprotol.spdz.secure_matrix.secure_matrix import SecureMatrix
from federatedml.secureprotol.spdz.tensor import fixedpoint_numpy
from federatedml.util import consts, fate_operator, LOGGER
class HeteroLinRHost(HeteroSSHEHostBase):
def __init__(self):
super().__init__()
self.model_name = 'HeteroLinearRegression'
self.model_param_name = 'HeteroLinearRegressionParam'
self.model_meta_name = 'HeteroLinearRegressionMeta'
self.model_param = HeteroSSHELinRParam()
self.labels = None
def forward(self, weights, features, labels, suffix, cipher, batch_weight=None):
if not self.reveal_every_iter:
LOGGER.info(f"[forward]: Calculate z in share...")
w_self, w_remote = weights
z = self._cal_z_in_share(w_self, w_remote, features, suffix, self.cipher)
else:
LOGGER.info(f"[forward]: Calculate z directly...")
w = weights.unboxed
z = features.dot_local(w)
self.wx_self = z
self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=True,
cipher=cipher,
z=z)
tensor_name = ".".join(("complete_z",) + suffix)
shared_z = SecureMatrix.from_source(tensor_name,
self.other_party,
cipher,
self.fixedpoint_encoder.n,
self.fixedpoint_encoder)
return shared_z
def compute_loss(self, weights=None, labels=None, suffix=None, cipher=None):
"""
Compute hetero linr loss:
loss = (1/N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
(wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2 * (wx_h * (wx_g - y))
"""
LOGGER.info(f"[compute_loss]: Calculate loss ...")
wx_self_square = (self.wx_self * self.wx_self).reduce(operator.add)
self.secure_matrix_obj.share_encrypted_matrix(suffix=suffix,
is_remote=True,
cipher=cipher,
wx_self_square=wx_self_square)
tensor_name = ".".join(("shared_loss",) + suffix)
share_loss = SecureMatrix.from_source(tensor_name=tensor_name,
source=self.other_party,
cipher=cipher,
q_field=self.fixedpoint_encoder.n,
encoder=self.fixedpoint_encoder,
is_fixedpoint_table=False)
if self.reveal_every_iter:
loss_norm = self.optimizer.loss_norm(weights)
if loss_norm:
share_loss += loss_norm
LOGGER.debug(f"share_loss+loss_norm: {share_loss}")
tensor_name = ".".join(("loss",) + suffix)
share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
else:
tensor_name = ".".join(("loss",) + suffix)
share_loss.broadcast_reconstruct_share(tensor_name=tensor_name)
if self.optimizer.penalty == consts.L2_PENALTY:
w_self, w_remote = weights
w_encode = np.hstack((w_self.value, w_remote.value))
w_encode = np.array([w_encode])
w_tensor_name = ".".join(("loss_norm_w",) + suffix)
w_tensor = fixedpoint_numpy.FixedPointTensor(value=w_encode,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_name)
w_tensor_transpose_name = ".".join(("loss_norm_w_transpose",) + suffix)
w_tensor_transpose = fixedpoint_numpy.FixedPointTensor(value=w_encode.T,
q_field=self.fixedpoint_encoder.n,
endec=self.fixedpoint_encoder,
tensor_name=w_tensor_transpose_name)
loss_norm_tensor_name = ".".join(("loss_norm",) + suffix)
loss_norm = w_tensor.dot(w_tensor_transpose, target_name=loss_norm_tensor_name)
loss_norm.broadcast_reconstruct_share()
def predict(self, data_instances):
LOGGER.info("Start predict ...")
self._abnormal_detection(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
LOGGER.debug(f"Before_predict_reveal_strategy: {self.model_param.reveal_strategy},"
f" {self.is_respectively_reveal}")
def _vec_dot(v, coef, intercept):
return fate_operator.vec_dot(v.features, coef) + intercept
f = functools.partial(_vec_dot,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
host_pred = data_instances.mapValues(f)
self.transfer_variable.host_prob.remote(host_pred, role=consts.GUEST, idx=0)
LOGGER.info("Remote probability to Guest")
def _get_param(self):
if self.need_cv:
param_protobuf_obj = linr_model_param_pb2.LinRModelParam()
return param_protobuf_obj
self.header = self.header if self.header else []
single_result = self.get_single_model_param()
param_protobuf_obj = linr_model_param_pb2.LinRModelParam(**single_result)
return param_protobuf_obj
def _get_meta(self):
meta_protobuf_obj = linr_model_meta_pb2.LinRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
reveal_strategy=self.model_param.reveal_strategy)
return meta_protobuf_obj
def load_model(self, model_dict):
result_obj, _ = super().load_model(model_dict)
self.load_single_model(result_obj)
def fit(self, data_instances, validate_data=None):
LOGGER.info("Starting to fit hetero_sshe_linear_regression")
self.prepare_fit(data_instances, validate_data)
self.fit_single_model(data_instances, validate_data)
| 8,343 | 48.082353 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/linear_model/bilateral_linear_model/hetero_sshe_linear_regression/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 614 | 40 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/util/ipcl_operator.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numpy import ndarray
from federatedml.util import LOGGER
try:
from ipcl_python import PaillierEncryptedNumber as IpclPaillierEncryptedNumber
from ipcl_python.bindings.ipcl_bindings import ipclCipherText
except ImportError:
LOGGER.info("ipcl_python failed to import")
pass
def get_coeffs(weights):
"""
IPCL encrypts all weights (coefficients and intercept) into one single encrypted number.
This function allows to get an IPCL encrypted number which contains all coefficents but intercept.
Args:
weights (IpclPaillierEncryptedNumber): all model weights in one encrypted number
Returns:
(IpclPaillierEncryptedNumber): all coefficients in one encrypted number
"""
coeff_num = weights.__len__() - 1
pub_key = weights.public_key
bn = []
exp = []
for i in range(coeff_num):
bn.append(weights.ciphertextBN(i))
exp.append(weights.exponent(i))
ct = ipclCipherText(pub_key.pubkey, bn)
return IpclPaillierEncryptedNumber(pub_key, ct, exp, coeff_num)
def get_intercept(weights):
"""
IPCL encrypts all weights (coefficients and intercept) into one single encrypted number.
This function allows to get the encrypted number of intercept.
Args:
weights (IpclPaillierEncryptedNumber): all model weights in one encrypted number
Returns:
(IpclPaillierEncryptedNumber): IPCL encrypted number of intercept
"""
coeff_num = weights.__len__() - 1
pub_key = weights.public_key
bn = [weights.ciphertextBN(coeff_num)]
exp = [weights.exponent(coeff_num)]
ct = ipclCipherText(pub_key.pubkey, bn)
return IpclPaillierEncryptedNumber(pub_key, ct, exp, 1)
def merge_encrypted_number_array(values):
"""
Put all IPCL encrypted numbers of a 1-d array into one encrypted number.
Args:
values (numpy.ndarray, list): an array of multiple IPCL encrypted numbers
Returns:
(IpclPaillierEncryptedNumber): one encrypted number contains all values
"""
assert isinstance(values, (list, ndarray))
pub_key = values[0].public_key
bn, exp = [], []
for i in range(len(values)):
assert values[i].__len__() == 1
bn.append(values[i].ciphertextBN(0))
exp.append(values[i].exponent(0))
ct = ipclCipherText(pub_key.pubkey, bn)
return IpclPaillierEncryptedNumber(pub_key, ct, exp, len(values))
| 3,020 | 32.94382 | 102 |
py
|
FATE
|
FATE-master/python/federatedml/util/paillier_check.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numpy import ndarray, ndim
from federatedml.util import LOGGER
from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber
ipcl_enabled = False
try:
from ipcl_python import PaillierEncryptedNumber as IpclPaillierEncryptedNumber
ipcl_enabled = True
except ImportError:
LOGGER.info("ipcl_python failed to import")
pass
def is_encrypted_number(values, encrypted_type):
if isinstance(values, ndarray):
return isinstance(values.item(0), encrypted_type)
elif isinstance(values, list):
return isinstance(values[0], encrypted_type)
else:
return isinstance(values, encrypted_type)
def is_fate_paillier_encrypted_number(values):
return is_encrypted_number(values, PaillierEncryptedNumber)
def is_ipcl_encrypted_number(values):
if ipcl_enabled:
return is_encrypted_number(values, IpclPaillierEncryptedNumber)
return False
def is_paillier_encrypted_number(values):
return is_fate_paillier_encrypted_number(values) or is_ipcl_encrypted_number(values)
def is_single_ipcl_encrypted_number(values):
"""
Return True if input numpy array contains only one IPCL encrypted number, not a list
Args:
values (numpy.ndarray)
"""
if ipcl_enabled and isinstance(values, ndarray):
return ndim(values) == 0 and isinstance(values.item(0), IpclPaillierEncryptedNumber)
return False
| 2,017 | 31.031746 | 92 |
py
|
FATE
|
FATE-master/python/federatedml/util/schema_check.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import is_table
from federatedml.util import LOGGER
def check_schema(input_schema, output_schema):
LOGGER.debug(f"input schema: {input_schema} -> output schema: {output_schema}")
if output_schema is None:
raise EnvironmentError(
f"output_schema is None while input data has schema.")
input_header = input_schema.get("header", None)
output_header = output_schema.get("header", None)
if input_header is not None and output_header is None:
raise EnvironmentError(
f"output header is None while input data has header.")
def assert_schema_consistent(func):
def _func(*args, **kwargs):
input_schema = None
all_args = []
all_args.extend(args)
all_args.extend(kwargs.values())
for arg in all_args:
if is_table(arg):
input_schema = arg.schema
break
result = func(*args, **kwargs)
if input_schema is not None:
# single data set
if is_table(result) and result.count() > 0:
output_schema = result.schema
check_schema(input_schema, output_schema)
# multiple data sets
elif type(result).__name__ in ["list", "tuple"]:
for output_data in result:
if is_table(output_data) and output_data.count() > 0:
output_schema = output_data.schema
check_schema(input_schema, output_schema)
return result
return _func
| 2,178 | 35.316667 | 83 |
py
|
FATE
|
FATE-master/python/federatedml/util/homo_label_encoder.py
|
from federatedml.transfer_variable.transfer_class.homo_label_encoder_transfer_variable \
import HomoLabelEncoderTransferVariable
from federatedml.util import consts
from federatedml.util import LOGGER
class HomoLabelEncoderClient(object):
def __init__(self):
self.transvar = HomoLabelEncoderTransferVariable()
def label_alignment(self, class_set):
LOGGER.info('start homo label alignments')
self.transvar.local_labels.remote(class_set, role=consts.ARBITER, suffix=('label_align',))
new_label_mapping = self.transvar.label_mapping.get(idx=0, suffix=('label_mapping',))
reverse_mapping = {v: k for k, v in new_label_mapping.items()}
new_classes_index = [new_label_mapping[k] for k in new_label_mapping]
new_classes_index = sorted(new_classes_index)
aligned_labels = [reverse_mapping[i] for i in new_classes_index]
return aligned_labels, new_label_mapping
class HomoLabelEncoderArbiter(object):
def __init__(self):
self.transvar = HomoLabelEncoderTransferVariable()
def label_alignment(self):
LOGGER.info('start homo label alignments')
labels = self.transvar.local_labels.get(idx=-1, suffix=('label_align', ))
label_set = set()
for local_label in labels:
label_set.update(local_label)
global_label = list(label_set)
global_label = sorted(global_label)
label_mapping = {v: k for k, v in enumerate(global_label)}
self.transvar.label_mapping.remote(label_mapping, idx=-1, suffix=('label_mapping',))
return label_mapping
| 1,607 | 39.2 | 98 |
py
|
FATE
|
FATE-master/python/federatedml/util/consts.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ARBITER = 'arbiter'
HOST = 'host'
GUEST = 'guest'
MODEL_AGG = "model_agg"
GRAD_AGG = "grad_agg"
BINARY = 'binary'
MULTY = 'multi'
CLASSIFICATION = "classification"
REGRESSION = 'regression'
CLUSTERING = 'clustering'
CAUSAL_LM = "causal_ml"
SEQ_2_SEQ_LM = "seq_2_seq_lm"
ONE_VS_REST = 'one_vs_rest'
PAILLIER = 'Paillier'
PAILLIER_IPCL = 'IPCL'
RANDOM_PADS = "RandomPads"
NONE = "None"
AFFINE = 'Affine'
ITERATIVEAFFINE = 'IterativeAffine'
RANDOM_ITERATIVEAFFINE = 'RandomIterativeAffine'
L1_PENALTY = 'L1'
L2_PENALTY = 'L2'
FLOAT_ZERO = 1e-8
OVERFLOW_THRESHOLD = 1e8
OT_HAUCK = 'OT_Hauck'
CE_PH = 'CommutativeEncryptionPohligHellman'
XOR = 'xor'
AES = 'aes'
PARAM_MAXDEPTH = 5
MAX_CLASSNUM = 1000
MIN_BATCH_SIZE = 10
SPARSE_VECTOR = "SparseVector"
HETERO = "hetero"
HOMO = "homo"
RAW = "raw"
RSA = "rsa"
DH = "dh"
ECDH = "ecdh"
# evaluation
AUC = "auc"
KS = "ks"
LIFT = "lift"
GAIN = "gain"
PRECISION = "precision"
RECALL = "recall"
ACCURACY = "accuracy"
EXPLAINED_VARIANCE = "explained_variance"
MEAN_ABSOLUTE_ERROR = "mean_absolute_error"
MEAN_SQUARED_ERROR = "mean_squared_error"
MEAN_SQUARED_LOG_ERROR = "mean_squared_log_error"
MEDIAN_ABSOLUTE_ERROR = "median_absolute_error"
R2_SCORE = "r2_score"
ROOT_MEAN_SQUARED_ERROR = "root_mean_squared_error"
ROC = "roc"
F1_SCORE = 'f1_score'
CONFUSION_MAT = 'confusion_mat'
PSI = 'psi'
VIF = 'vif'
PEARSON = 'pearson'
FEATURE_IMPORTANCE = 'feature_importance'
QUANTILE_PR = 'quantile_pr'
JACCARD_SIMILARITY_SCORE = 'jaccard_similarity_score'
FOWLKES_MALLOWS_SCORE = 'fowlkes_mallows_score'
ADJUSTED_RAND_SCORE = 'adjusted_rand_score'
DAVIES_BOULDIN_INDEX = 'davies_bouldin_index'
DISTANCE_MEASURE = 'distance_measure'
CONTINGENCY_MATRIX = 'contingency_matrix'
# evaluation alias metric
ALL_METRIC_NAME = [AUC, KS, LIFT, GAIN, PRECISION, RECALL, ACCURACY, EXPLAINED_VARIANCE, MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR, MEAN_SQUARED_LOG_ERROR, MEDIAN_ABSOLUTE_ERROR, R2_SCORE, ROOT_MEAN_SQUARED_ERROR,
ROC, F1_SCORE, CONFUSION_MAT, PSI, QUANTILE_PR, JACCARD_SIMILARITY_SCORE, FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE, DAVIES_BOULDIN_INDEX, DISTANCE_MEASURE, CONTINGENCY_MATRIX]
ALIAS = {
('l1', 'mae', 'regression_l1'): MEAN_ABSOLUTE_ERROR,
('l2', 'mse', 'regression_l2', 'regression'): MEAN_SQUARED_ERROR,
('l2_root', 'rmse'): ROOT_MEAN_SQUARED_ERROR,
('msle', ): MEAN_SQUARED_LOG_ERROR,
('r2', ): R2_SCORE,
('acc', ): ACCURACY,
('DBI', ): DAVIES_BOULDIN_INDEX,
('FMI', ): FOWLKES_MALLOWS_SCORE,
('RI', ): ADJUSTED_RAND_SCORE,
('jaccard', ): JACCARD_SIMILARITY_SCORE
}
# default evaluation metrics
DEFAULT_BINARY_METRIC = [AUC, KS]
DEFAULT_REGRESSION_METRIC = [ROOT_MEAN_SQUARED_ERROR, MEAN_ABSOLUTE_ERROR]
DEFAULT_MULTI_METRIC = [ACCURACY, PRECISION, RECALL]
DEFAULT_CLUSTER_METRIC = [DAVIES_BOULDIN_INDEX]
# allowed metrics for different tasks
ALL_BINARY_METRICS = [
AUC,
KS,
LIFT,
GAIN,
ACCURACY,
PRECISION,
RECALL,
ROC,
CONFUSION_MAT,
PSI,
F1_SCORE,
QUANTILE_PR
]
ALL_REGRESSION_METRICS = [
EXPLAINED_VARIANCE,
MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR,
MEDIAN_ABSOLUTE_ERROR,
R2_SCORE,
ROOT_MEAN_SQUARED_ERROR
]
ALL_MULTI_METRICS = [
ACCURACY,
PRECISION,
RECALL
]
ALL_CLUSTER_METRICS = [
JACCARD_SIMILARITY_SCORE,
FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE,
DAVIES_BOULDIN_INDEX,
DISTANCE_MEASURE,
CONTINGENCY_MATRIX
]
# single value metrics
REGRESSION_SINGLE_VALUE_METRICS = [
EXPLAINED_VARIANCE,
MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR,
MEAN_SQUARED_LOG_ERROR,
MEDIAN_ABSOLUTE_ERROR,
R2_SCORE,
ROOT_MEAN_SQUARED_ERROR,
]
BINARY_SINGLE_VALUE_METRIC = [
AUC,
KS
]
MULTI_SINGLE_VALUE_METRIC = [
PRECISION,
RECALL,
ACCURACY
]
CLUSTER_SINGLE_VALUE_METRIC = [
JACCARD_SIMILARITY_SCORE,
FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE,
DAVIES_BOULDIN_INDEX
]
# workflow
TRAIN_DATA = "train_data"
TEST_DATA = "test_data"
# initialize method
RANDOM_NORMAL = "random_normal"
RANDOM_UNIFORM = 'random_uniform'
ONES = 'ones'
ZEROS = 'zeros'
CONST = 'const'
# decision tree
MAX_SPLIT_NODES = 2 ** 16
MAX_SPLITINFO_TO_COMPUTE = 2 ** 10
NORMAL_TREE = 'normal'
COMPLETE_SECURE_TREE = 'complete_secure'
STD_TREE = 'std'
MIX_TREE = 'mix'
LAYERED_TREE = 'layered'
SINGLE_OUTPUT = 'single_output'
MULTI_OUTPUT = 'multi_output'
HOST_LOCAL = 'hostLocal'
TRAIN_EVALUATE = 'train_evaluate'
VALIDATE_EVALUATE = 'validate_evaluate'
# Feature engineering
G_BIN_NUM = 10
DEFAULT_COMPRESS_THRESHOLD = 10000
DEFAULT_HEAD_SIZE = 10000
DEFAULT_RELATIVE_ERROR = 1e-4
ONE_HOT_LIMIT = 1024 # No more than 10 possible values
PERCENTAGE_VALUE_LIMIT = 0.1
SECURE_AGG_AMPLIFY_FACTOR = 1000
QUANTILE = 'quantile'
BUCKET = 'bucket'
OPTIMAL = 'optimal'
VIRTUAL_SUMMARY = 'virtual_summary'
RECURSIVE_QUERY = 'recursive_query'
# Feature selection methods
UNIQUE_VALUE = 'unique_value'
IV_VALUE_THRES = 'iv_value_thres'
IV_PERCENTILE = 'iv_percentile'
IV_TOP_K = 'iv_top_k'
COEFFICIENT_OF_VARIATION_VALUE_THRES = 'coefficient_of_variation_value_thres'
# COEFFICIENT_OF_VARIATION_PERCENTILE = 'coefficient_of_variation_percentile'
OUTLIER_COLS = 'outlier_cols'
MANUALLY_FILTER = 'manually'
PERCENTAGE_VALUE = 'percentage_value'
IV_FILTER = 'iv_filter'
STATISTIC_FILTER = 'statistic_filter'
PSI_FILTER = 'psi_filter'
VIF_FILTER = 'vif_filter'
CORRELATION_FILTER = 'correlation_filter'
SECUREBOOST = 'sbt'
HETERO_SBT_FILTER = 'hetero_sbt_filter'
HOMO_SBT_FILTER = 'homo_sbt_filter'
HETERO_FAST_SBT_FILTER = 'hetero_fast_sbt_filter'
IV = 'iv'
# Selection Pre-model
STATISTIC_MODEL = 'statistic_model'
BINNING_MODEL = 'binning_model'
# imputer
MIN = 'min'
MAX = 'max'
MEAN = 'mean'
DESIGNATED = 'designated'
STR = 'str'
FLOAT = 'float'
INT = 'int'
ORIGIN = 'origin'
MEDIAN = 'median'
# min_max_scaler
NORMAL = 'normal'
CAP = 'cap'
MINMAXSCALE = 'min_max_scale'
STANDARDSCALE = 'standard_scale'
ALL = 'all'
COL = 'col'
# intersection cache
PHONE = 'phone'
IMEI = 'imei'
MD5 = 'md5'
SHA1 = 'sha1'
SHA224 = 'sha224'
SHA256 = 'sha256'
SHA384 = 'sha384'
SHA512 = 'sha512'
SM3 = 'sm3'
INTERSECT_CACHE_TAG = 'Za'
SHARE_INFO_COL_NAME = "share_info"
# statistics
COUNT = 'count'
STANDARD_DEVIATION = 'stddev'
SUMMARY = 'summary'
DESCRIBE = 'describe'
SUM = 'sum'
COVARIANCE = 'cov'
CORRELATION = 'corr'
VARIANCE = 'variance'
COEFFICIENT_OF_VARIATION = 'coefficient_of_variance'
MISSING_COUNT = "missing_count"
MISSING_RATIO = "missing_ratio"
SKEWNESS = 'skewness'
KURTOSIS = 'kurtosis'
# adapters model name
HOMO_SBT = 'homo_sbt'
HETERO_SBT = 'hetero_sbt'
HETERO_FAST_SBT = 'hetero_fast_sbt'
HETERO_FAST_SBT_MIX = 'hetero_fast_sbt_mix'
HETERO_FAST_SBT_LAYERED = 'hetero_fast_sbt_layered'
# tree protobuf model name
HETERO_SBT_GUEST_MODEL = 'HeteroSecureBoostingTreeGuest'
HETERO_SBT_HOST_MODEL = 'HeteroSecureBoostingTreeHost'
HETERO_FAST_SBT_GUEST_MODEL = "HeteroFastSecureBoostingTreeGuest"
HETERO_FAST_SBT_HOST_MODEL = "HeteroFastSecureBoostingTreeHost"
HOMO_SBT_GUEST_MODEL = "HomoSecureBoostingTreeGuest"
HOMO_SBT_HOST_MODEL = "HomoSecureBoostingTreeHost"
# tree decimal round to prevent float error
TREE_DECIMAL_ROUND = 10
# homm sbt backend
MEMORY_BACKEND = 'memory'
DISTRIBUTED_BACKEND = 'distributed'
# column_expand
MANUAL = 'manual'
# scorecard
CREDIT = 'credit'
# sample weight
BALANCED = 'balanced'
# min r base fraction
MIN_BASE_FRACTION = 0.01
MAX_BASE_FRACTION = 0.99
MAX_SAMPLE_OUTPUT_LIMIT = 10 ** 6
# Hetero NN Selective BP Strategy
SELECTIVE_SIZE = 1024
# intersect join methods
INNER_JOIN = "inner_join"
LEFT_JOIN = "left_join"
DEFAULT_KEY_LENGTH = 1024
MIN_HASH_FUNC_COUNT = 4
MAX_HASH_FUNC_COUNT = 32
EINI_TREE_COMPLEXITY = 1000000000
pytorch_backend = 'pytorch'
keras_backend = 'keras'
CURVE25519 = 'curve25519'
# HOMO NN Framework
FEDAVG_TRAINER = 'fedavg_trainer'
# DEEPSPEED
DEEPSPEED_MODEL_DIR = "EGGROLL_CONTAINER_MODELS_DIR"
FLOW_MODEL_SYNC_PATH = "MODEL_PATH"
# positive unlabeled
PROBABILITY = "probability"
QUANTITY = "quantity"
PROPORTION = "proportion"
DISTRIBUTION = "distribution"
| 8,740 | 22.882514 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/util/fate_operator.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import Iterable
import numpy as np
from scipy.sparse import csr_matrix
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.util import paillier_check
def _one_dimension_dot(X, w):
res = 0
# LOGGER.debug("_one_dimension_dot, len of w: {}, len of X: {}".format(len(w), len(X)))
# If all weights are in one single IPCL encrypted number
if paillier_check.is_single_ipcl_encrypted_number(w):
if isinstance(X, csr_matrix):
res = w.item(0).dot(X.data)
else:
res = w.item(0).dot(X)
return res
if isinstance(X, csr_matrix):
for idx, value in zip(X.indices, X.data):
res += value * w[idx]
else:
for i in range(len(X)):
if np.fabs(X[i]) < 1e-5:
continue
res += w[i] * X[i]
if res == 0:
if paillier_check.is_paillier_encrypted_number(w[0]):
res = 0 * w[0]
return res
def dot(value, w):
w_ndim = np.ndim(w)
if paillier_check.is_single_ipcl_encrypted_number(w):
w_ndim += 1
if isinstance(value, Instance):
X = value.features
else:
X = value
# # dot(a, b)[i, j, k, m] = sum(a[i, j, :] * b[k, :, m])
# # One-dimension dot, which is the inner product of these two arrays
if np.ndim(X) == w_ndim == 1:
return _one_dimension_dot(X, w)
elif np.ndim(X) == 2 and w_ndim == 1:
res = []
for x in X:
res.append(_one_dimension_dot(x, w))
res = np.array(res)
else:
res = np.dot(X, w)
return res
def vec_dot(x, w):
new_data = 0
if isinstance(x, SparseVector):
for idx, v in x.get_all_data():
# if idx < len(w):
new_data += v * w[idx]
else:
new_data = np.dot(x, w)
return new_data
def reduce_add(x, y):
if x is None and y is None:
return None
if x is None:
return y
if y is None:
return x
if not isinstance(x, Iterable):
result = x + y
elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
result = x + y
else:
result = []
for idx, acc in enumerate(x):
if acc is None:
result.append(acc)
continue
result.append(acc + y[idx])
return result
def norm(vector, p=2):
"""
Get p-norm of this vector
Parameters
----------
vector : numpy array, Input vector
p: int, p-norm
"""
if p < 1:
raise ValueError('p should larger or equal to 1 in p-norm')
if type(vector).__name__ != 'ndarray':
vector = np.array(vector)
return np.linalg.norm(vector, p)
# def generate_anonymous(fid, party_id=None, role=None, model=None):
# if model is None:
# if party_id is None or role is None:
# raise ValueError("party_id or role should be provided when generating"
# "anonymous.")
# if party_id is None:
# party_id = model.component_properties.local_partyid
# if role is None:
# role = model.role
#
# party_id = str(party_id)
# fid = str(fid)
# return "_".join([role, party_id, fid])
#
#
# def reconstruct_fid(encoded_name):
# try:
# col_index = int(encoded_name.split('_')[-1])
# except IndexError or ValueError:
# raise RuntimeError(f"Decode name: {encoded_name} is not a valid value")
# return col_index
| 4,144 | 26.269737 | 91 |
py
|
FATE
|
FATE-master/python/federatedml/util/conversion.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def int_to_bytes(integer):
"""
Convert an int to bytes
:param integer:
:return: bytes
"""
return integer.to_bytes((integer.bit_length() + 7) // 8, 'big')
# alternatively
# return bytes.fromhex(hex(integer)[2:])
def bytes_to_int(bytes_arr):
"""
Convert bytes to an int
:param bytes_arr:
:return: int
"""
return int.from_bytes(bytes_arr, byteorder='big', signed=False)
def bytes_to_bin(bytes_arr):
"""
Convert bytes to a binary number
:param bytes_arr:
:return: str, whose length must be a multiple of 8
"""
res = bin(bytes_to_int(bytes_arr))[2:]
return bin_compensate(res)
def int_to_binary_representation(integer):
"""
integer = 2^e1 + 2^e2 + ... + 2^ek, e1 > ... > ek
:param integer: int
:return: [e1, e2, ..., ek]
"""
bin_str = bin(integer)[2:]
bin_len = len(bin_str)
exponent_list = []
for i in range(bin_len):
if bin_str[i] == '1':
exponent_list.append(bin_len - i - 1)
return exponent_list
def str_to_bin(str_arr):
"""
Convert a string to a binary number in string
:param str_arr: str
:return: str
"""
res = ''
for st in str_arr:
char = bin(ord(st))[2:]
res += bin_compensate(char)
return res
def bin_to_str(bin_str_arr):
"""
Convert binary number in string to string
:param bin_str_arr: str, whose length must be a multiple of 8
:return: str
"""
res = ''
for i in range(0, len(bin_str_arr), 8):
res += chr(int(bin_str_arr[i:i + 8], 2))
return res
def bin_compensate(bin_arr):
"""
Compensate a binary number in string with zero till its length being a multiple of 8
:param bin_arr: str
:return: str
"""
return '0' * (8 - len(bin_arr) % 8) + bin_arr
def str_to_int(str_arr):
"""
:param str_arr: str
:return: int
"""
return int(str_to_bin(str_arr), 2)
def int_to_str(integer):
"""
:param integer: int
:return: str
"""
return bin_to_str(bin_compensate(bin(integer)[2:]))
def str_to_bytes(str_arr):
"""
'hello' -> b'hello'
:param str_arr: str
:return: bytes
"""
return bytes(str_arr, 'utf-8')
def bytes_to_str(byte_arr):
"""
b'hello' -> 'hello'
:param byte_arr: bytes
:return: str
"""
return str(byte_arr, 'utf-8')
# return str(byte_arr, 'utf-8')
| 3,085 | 21.525547 | 88 |
py
|
FATE
|
FATE-master/python/federatedml/util/abnormal_detection.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import functools
import copy
from federatedml.statistic import data_overview
from federatedml.util import LOGGER
def empty_table_detection(data_instances):
num_data = data_instances.count()
if num_data == 0:
raise ValueError(f"Count of data_instance is 0: {data_instances}")
def empty_feature_detection(data_instances):
is_empty_feature = data_overview.is_empty_feature(data_instances)
if is_empty_feature:
raise ValueError(f"Number of features of Table is 0: {data_instances}")
def column_gathering(iterable, ):
non_empty_columns = set()
for k, v in iterable:
features = v.features
if isinstance(features.dtype, (np.int, np.int64, np.int32, np.float, np.float32, np.float64, np.long)):
non_empty_columns.update(np.where(~np.isnan(features))[0])
else:
for col_idx, col_v in enumerate(features):
if col_v != col_v or col_v == "":
continue
else:
non_empty_columns.add(col_idx)
return non_empty_columns
def merge_column_sets(v1: set, v2: set):
v1_copy = copy.deepcopy(v1)
v2_copy = copy.deepcopy(v2)
v1_copy.update(v2_copy)
return v1_copy
def empty_column_detection(data_instance):
contains_empty_columns = False
lost_feat = []
is_sparse = data_overview.is_sparse_data(data_instance)
if is_sparse:
raise ValueError('sparse format empty column detection is not supported for now')
map_func = functools.partial(column_gathering, )
map_rs = data_instance.applyPartitions(map_func)
reduce_rs = map_rs.reduce(merge_column_sets)
# transform col index to col name
reduce_rs = np.array(data_instance.schema['header'])[list(reduce_rs)]
reduce_rs = set(reduce_rs)
if reduce_rs != set(data_instance.schema['header']):
lost_feat = list(set(data_instance.schema['header']).difference(reduce_rs))
contains_empty_columns = True
if contains_empty_columns:
raise ValueError('column(s) {} contain(s) no values'.format(lost_feat))
def check_legal_schema(schema):
# check for repeated header & illegal/non-printable chars except for space
# allow non-ascii chars
LOGGER.debug(f"schema is {schema}")
if schema is None:
return
header = schema.get("header", None)
LOGGER.debug(f"header is {header}")
if header is not None:
for col_name in header:
if not col_name.isprintable():
raise ValueError(f"non-printable char found in header column {col_name}, please check.")
header_set = set(header)
if len(header_set) != len(header):
raise ValueError(f"data header contains repeated names, please check.")
sid_name = schema.get("sid", None)
LOGGER.debug(f"sid is {sid_name}")
if sid_name is not None and not sid_name.isprintable():
raise ValueError(f"non-printable char found in sid_name {sid_name}, please check.")
label_name = schema.get("label_name", None)
LOGGER.debug(f"label_name is {label_name}")
if label_name is not None and not label_name.isprintable():
raise ValueError(f"non-printable char found in label_name {label_name}, please check.")
| 3,920 | 34.645455 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/util/anonymous_generator_util.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from federatedml.util.data_format_preprocess import DataFormatPreProcess
ANONYMOUS_COLUMN_PREFIX = "x"
ANONYMOUS_LABEL = "y"
SPLICES = "_"
class Anonymous(object):
def __init__(self, role=None, party_id=None, migrate_mapping=None):
self._role = role
self._party_id = party_id
self._migrate_mapping = migrate_mapping
def migrate_schema_anonymous(self, schema):
if "anonymous_header" in schema:
schema["anonymous_header"] = self.migrate_anonymous(schema["anonymous_header"])
if "anonymous_label" in schema:
schema["anonymous_label"] = self.migrate_anonymous(schema['anonymous_label'])
return schema
def migrate_anonymous(self, anonymous_header):
ret_list = True
if not isinstance(anonymous_header, list):
ret_list = False
anonymous_header = [anonymous_header]
migrate_anonymous_header = []
for column in anonymous_header:
role, party_id, suf = column.split(SPLICES, 2)
try:
migrate_party_id = self._migrate_mapping[role][int(party_id)]
except KeyError:
migrate_party_id = self._migrate_mapping[role][party_id]
except BaseException:
migrate_party_id = None
if migrate_party_id is not None:
migrate_anonymous_header.append(self.generate_anonymous_column(role, migrate_party_id, suf))
else:
migrate_anonymous_header.append(column)
if not ret_list:
migrate_anonymous_header = migrate_anonymous_header[0]
return migrate_anonymous_header
def is_anonymous(self, column):
splits = self.get_anonymous_column_splits(column)
if len(splits) < 3:
return False
role, party_id = splits[0], splits[1]
return role in self._migrate_mapping and int(party_id) in self._migrate_mapping[role]
def extend_columns(self, original_anonymous_header, extend_header):
extend_anonymous_header = []
exp_start_idx = 0
for anonymous_col_name in original_anonymous_header:
if not self.is_expand_column(anonymous_col_name):
continue
exp_start_idx = max(exp_start_idx, self.get_expand_idx(anonymous_col_name) + 1)
for i in range(len(extend_header)):
extend_anonymous_header.append(self.__generate_expand_anonymous_column(exp_start_idx + i))
return original_anonymous_header + extend_anonymous_header
@staticmethod
def get_party_id_from_anonymous_column(anonymous_column):
splits = Anonymous.get_anonymous_column_splits(anonymous_column)
if len(splits) < 3:
raise ValueError("This is not a anonymous_column")
return splits[1]
@staticmethod
def get_role_from_anonymous_column(anonymous_column):
splits = Anonymous.get_anonymous_column_splits(anonymous_column)
if len(splits) < 3:
raise ValueError("This is not a anonymous_column")
return splits[0]
@staticmethod
def get_suffix_from_anonymous_column(anonymous_column):
splits = Anonymous.get_anonymous_column_splits(anonymous_column, num=2)
if len(splits) < 3:
raise ValueError("This is not a anonymous_column")
return splits[-1]
@staticmethod
def get_anonymous_header(schema):
return schema["anonymous_header"]
@staticmethod
def filter_anonymous_header(schema, filter_ins):
return schema["anonymous_header"][np.array(filter_ins)]
@staticmethod
def reset_anonymous_header(schema, anonymous_header):
new_schema = copy.deepcopy(schema)
new_schema["anonymous_header"] = anonymous_header
return new_schema
@staticmethod
def generate_derived_header(original_header, original_anonymous_header, derived_dict):
new_anonymous_header = []
for column, anonymous_column in zip(original_header, original_anonymous_header):
if column not in derived_dict:
new_anonymous_header.append(anonymous_column)
else:
for i in range(len(derived_dict[column])):
new_anonymous_column = SPLICES.join([anonymous_column, str(i)])
new_anonymous_header.append(new_anonymous_column)
return new_anonymous_header
def __generate_expand_anonymous_column(self, fid):
return SPLICES.join(map(str, [self._role, self._party_id, "exp", fid]))
@staticmethod
def generate_anonymous_column(role, party_id, suf):
return SPLICES.join([role, str(party_id), suf])
@staticmethod
def get_anonymous_column_splits(column, num=-1):
return column.split(SPLICES, num)
@staticmethod
def is_expand_column(column_name):
splits = Anonymous.get_anonymous_column_splits(column_name)
return splits[-2] == "exp"
@staticmethod
def get_expand_idx(column_name):
return int(Anonymous.get_anonymous_column_splits(column_name)[-1])
@staticmethod
def update_anonymous_header_with_role(schema, role, party_id):
party_id = str(party_id)
new_schema = copy.deepcopy(schema)
if "anonymous_header" in schema:
old_anonymous_header = schema["anonymous_header"]
new_anonymous_header = [Anonymous.generate_anonymous_column(role, party_id, col_name)
for col_name in old_anonymous_header]
new_schema["anonymous_header"] = new_anonymous_header
if "label_name" in schema:
new_schema["anonymous_label"] = Anonymous.generate_anonymous_column(role, party_id, ANONYMOUS_LABEL)
return new_schema
def generate_anonymous_header(self, schema):
new_schema = copy.deepcopy(schema)
header = schema["header"]
if self._role:
anonymous_header = [Anonymous.generate_anonymous_column(self._role,
self._party_id,
ANONYMOUS_COLUMN_PREFIX + str(i))
for i in range(len(header))]
else:
anonymous_header = [ANONYMOUS_COLUMN_PREFIX + str(i) for i in range(len(header))]
new_schema["anonymous_header"] = anonymous_header
if "label_name" in schema:
if self._role:
new_schema["anonymous_label"] = self.generate_anonymous_column(self._role,
self._party_id,
ANONYMOUS_LABEL)
else:
new_schema["anonymous_label"] = ANONYMOUS_LABEL
return new_schema
def generated_compatible_anonymous_header_with_old_version(self, header):
if self._role is None or self._party_id is None:
raise ValueError("Please init anonymous generator with role & party_id")
return [SPLICES.join([self._role, str(self._party_id), str(idx)]) for idx in range(len(header))]
@staticmethod
def is_old_version_anonymous_header(anonymous_header):
for anonymous_col in anonymous_header:
splits = anonymous_col.split(SPLICES, -1)
if len(splits) != 3:
return False
try:
index = int(splits[2])
except ValueError:
return False
return True
| 8,203 | 37.516432 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/util/io_check.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import is_table
from federatedml.util import LOGGER
def assert_io_num_rows_equal(func):
def _func(*args, **kwargs):
input_count = None
all_args = []
all_args.extend(args)
all_args.extend(kwargs.values())
for arg in all_args:
if is_table(arg):
input_count = arg.count()
break
result = func(*args, **kwargs)
if input_count is not None and is_table(result):
output_count = result.count()
LOGGER.debug(f"num row of input: {input_count} -> num row of output: {output_count}")
if input_count != output_count:
raise EnvironmentError(
f"num row of input({input_count}) not equals to num row of output({output_count})")
return result
return _func
def check_with_inst_id(data_instances):
instance = data_instances.first()[1]
if type(instance).__name__ == "Instance" and instance.with_inst_id:
return True
return False
def check_is_instance(data_instances):
instance = data_instances.first()[1]
if type(instance).__name__ == "Instance":
return True
return False
def assert_match_id_consistent(func):
def _func(*args, **kwargs):
input_with_inst_id = None
all_args = []
all_args.extend(args)
all_args.extend(kwargs.values())
for arg in all_args:
if is_table(arg):
input_with_inst_id = check_with_inst_id(arg)
break
result = func(*args, **kwargs)
if input_with_inst_id is not None and is_table(result):
if check_is_instance(result):
result_with_inst_id = check_with_inst_id(result)
LOGGER.debug(
f"Input with match id: {input_with_inst_id} -> output with match id: {result_with_inst_id}")
if input_with_inst_id and not result_with_inst_id:
raise EnvironmentError(
f"Input with match id: {input_with_inst_id} -> output with match id: {result_with_inst_id},"
f"func: {func}")
return result
return _func
| 2,834 | 33.156627 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/util/component_properties.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import numpy as np
from fate_arch.computing import is_table
from federatedml.util import LOGGER
class RunningFuncs(object):
def __init__(self):
self.todo_func_list = []
self.todo_func_params = []
self.save_result = []
self.use_previews_result = []
def add_func(self, func, params, save_result=False, use_previews=False):
self.todo_func_list.append(func)
self.todo_func_params.append(params)
self.save_result.append(save_result)
self.use_previews_result.append(use_previews)
def __iter__(self):
for func, params, save_result, use_previews in zip(
self.todo_func_list,
self.todo_func_params,
self.save_result,
self.use_previews_result,
):
yield func, params, save_result, use_previews
class DSLConfigError(ValueError):
pass
class ComponentProperties(object):
def __init__(self):
self.need_cv = False
self.need_run = False
self.need_stepwise = False
self.has_model = False
self.has_isometric_model = False
self.has_train_data = False
self.has_eval_data = False
self.has_validate_data = False
self.has_test_data = False
self.has_normal_input_data = False
self.role = None
self.host_party_idlist = []
self.local_partyid = -1
self.guest_partyid = -1
self.input_data_count = 0
self.input_eval_data_count = 0
self.caches = None
self.is_warm_start = False
self.has_arbiter = False
def parse_caches(self, caches):
self.caches = caches
def parse_component_param(self, roles, param):
try:
need_cv = param.cv_param.need_cv
except AttributeError:
need_cv = False
self.need_cv = need_cv
try:
need_run = param.need_run
except AttributeError:
need_run = True
self.need_run = need_run
LOGGER.debug("need_run: {}, need_cv: {}".format(self.need_run, self.need_cv))
try:
need_stepwise = param.stepwise_param.need_stepwise
except AttributeError:
need_stepwise = False
self.need_stepwise = need_stepwise
self.has_arbiter = roles["role"].get("arbiter") is not None
self.role = roles["local"]["role"]
self.host_party_idlist = roles["role"].get("host")
self.local_partyid = roles["local"].get("party_id")
self.guest_partyid = roles["role"].get("guest")
if self.guest_partyid is not None:
self.guest_partyid = self.guest_partyid[0]
return self
def parse_dsl_args(self, datasets, model):
if "model" in model and model["model"] is not None:
self.has_model = True
if "isometric_model" in model and model["isometric_model"] is not None:
self.has_isometric_model = True
LOGGER.debug(f"parse_dsl_args data_sets: {datasets}")
if datasets is None:
return self
for data_key, data_dicts in datasets.items():
data_keys = list(data_dicts.keys())
for data_type in ["train_data", "eval_data", "validate_data", "test_data"]:
if data_type in data_keys:
setattr(self, f"has_{data_type}", True)
data_keys.remove(data_type)
LOGGER.debug(
f"[Data Parser], has_{data_type}:"
f" {getattr(self, f'has_{data_type}')}"
)
if len(data_keys) > 0:
self.has_normal_input_data = True
LOGGER.debug(
"[Data Parser], has_normal_data: {}".format(self.has_normal_input_data)
)
if self.has_eval_data:
if self.has_validate_data or self.has_test_data:
raise DSLConfigError(
"eval_data input should not be configured simultaneously"
" with validate_data or test_data"
)
# self._abnormal_dsl_config_detect()
if self.has_model and self.has_train_data:
self.is_warm_start = True
return self
def _abnormal_dsl_config_detect(self):
if self.has_validate_data:
if not self.has_train_data:
raise DSLConfigError(
"validate_data should be configured simultaneously"
" with train_data"
)
if self.has_train_data:
if self.has_normal_input_data or self.has_test_data:
raise DSLConfigError(
"train_data input should not be configured simultaneously"
" with data or test_data"
)
if self.has_normal_input_data:
if self.has_train_data or self.has_validate_data or self.has_test_data:
raise DSLConfigError(
"When data input has been configured, train_data, "
"validate_data or test_data should not be configured."
)
if self.has_test_data:
if not self.has_model:
raise DSLConfigError(
"When test_data input has been configured, model "
"input should be configured too."
)
if self.need_cv or self.need_stepwise:
if not self.has_train_data:
raise DSLConfigError(
"Train_data should be configured in cross-validate "
"task or stepwise task"
)
if (
self.has_validate_data
or self.has_normal_input_data
or self.has_test_data
):
raise DSLConfigError(
"Train_data should be set only if it is a cross-validate "
"task or a stepwise task"
)
if self.has_model or self.has_isometric_model:
raise DSLConfigError(
"In cross-validate task or stepwise task, model "
"or isometric_model should not be configured"
)
def extract_input_data(self, datasets, model):
model_data = {}
data = {}
LOGGER.debug(f"Input data_sets: {datasets}")
for cpn_name, data_dict in datasets.items():
for data_type in ["train_data", "eval_data", "validate_data", "test_data"]:
if data_type in data_dict:
d_table = data_dict.get(data_type)
if data_type in model_data:
if isinstance(model_data[data_type], list):
model_data[data_type].append(model.obtain_data(d_table))
else:
model_data[data_type] = [model_data[data_type], model.obtain_data(d_table)]
else:
model_data[data_type] = model.obtain_data(d_table)
del data_dict[data_type]
if len(data_dict) > 0:
LOGGER.debug(f"data_dict: {data_dict}")
for k, v in data_dict.items():
data_list = model.obtain_data(v)
LOGGER.debug(f"data_list: {data_list}")
if isinstance(data_list, list):
for i, data_i in enumerate(data_list):
data[".".join([cpn_name, k, str(i)])] = data_i
else:
data[".".join([cpn_name, k])] = data_list
train_data = model_data.get("train_data")
validate_data = None
if self.has_train_data:
if self.has_eval_data:
validate_data = model_data.get("eval_data")
elif self.has_validate_data:
validate_data = model_data.get("validate_data")
test_data = None
if self.has_test_data:
test_data = model_data.get("test_data")
self.has_test_data = True
elif self.has_eval_data and not self.has_train_data:
test_data = model_data.get("eval_data")
self.has_test_data = True
if validate_data or (self.has_train_data and self.has_eval_data):
self.has_validate_data = True
if self.has_train_data and is_table(train_data):
self.input_data_count = train_data.count()
elif self.has_normal_input_data:
for data_key, data_table in data.items():
if is_table(data_table):
self.input_data_count = data_table.count()
if self.has_validate_data and is_table(validate_data):
self.input_eval_data_count = validate_data.count()
self._abnormal_dsl_config_detect()
LOGGER.debug(
f"train_data: {train_data}, validate_data: {validate_data}, "
f"test_data: {test_data}, data: {data}"
)
return train_data, validate_data, test_data, data
def warm_start_process(self, running_funcs, model, train_data, validate_data, schema=None):
if schema is None:
for d in [train_data, validate_data]:
if d is not None:
schema = d.schema
break
running_funcs = self._train_process(running_funcs, model, train_data, validate_data,
test_data=None, schema=schema)
return running_funcs
def _train_process(self, running_funcs, model, train_data, validate_data, test_data, schema):
if self.has_train_data and self.has_validate_data:
running_funcs.add_func(model.set_flowid, ['fit'])
running_funcs.add_func(model.fit, [train_data, validate_data])
running_funcs.add_func(model.set_flowid, ['validate'])
running_funcs.add_func(model.predict, [train_data], save_result=True)
running_funcs.add_func(model.set_flowid, ['predict'])
running_funcs.add_func(model.predict, [validate_data], save_result=True)
running_funcs.add_func(self.union_data, ["train", "validate"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
elif self.has_train_data:
running_funcs.add_func(model.set_flowid, ['fit'])
running_funcs.add_func(model.fit, [train_data])
running_funcs.add_func(model.set_flowid, ['validate'])
running_funcs.add_func(model.predict, [train_data], save_result=True)
running_funcs.add_func(self.union_data, ["train"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
elif self.has_test_data:
running_funcs.add_func(model.set_flowid, ['predict'])
running_funcs.add_func(model.predict, [test_data], save_result=True)
running_funcs.add_func(self.union_data, ["predict"], use_previews=True, save_result=True)
running_funcs.add_func(model.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
return running_funcs
def extract_running_rules(self, datasets, models, cpn):
# train_data, eval_data, data = self.extract_input_data(args)
train_data, validate_data, test_data, data = self.extract_input_data(
datasets, cpn
)
running_funcs = RunningFuncs()
schema = None
for d in [train_data, validate_data, test_data]:
if isinstance(d, list):
if d[0] is not None:
schema = d[0].schema
break
elif d is not None:
schema = d.schema
break
if not self.need_run:
running_funcs.add_func(cpn.pass_data, [data], save_result=True)
return running_funcs
if self.need_cv:
running_funcs.add_func(cpn.cross_validation, [train_data], save_result=True)
return running_funcs
if self.need_stepwise:
running_funcs.add_func(cpn.stepwise, [train_data], save_result=True)
running_funcs.add_func(self.union_data, ["train"], use_previews=True, save_result=True)
running_funcs.add_func(cpn.set_predict_data_schema, [schema],
use_previews=True, save_result=True)
return running_funcs
if self.has_model or self.has_isometric_model:
running_funcs.add_func(cpn.load_model, [models])
if self.is_warm_start:
return self.warm_start_process(running_funcs, cpn, train_data, validate_data, schema)
running_funcs = self._train_process(running_funcs, cpn, train_data, validate_data, test_data, schema)
if self.has_normal_input_data and not self.has_model:
running_funcs.add_func(cpn.extract_data, [data], save_result=True)
running_funcs.add_func(cpn.set_flowid, ['fit'])
running_funcs.add_func(cpn.fit, [], use_previews=True, save_result=True)
if self.has_normal_input_data and self.has_model:
running_funcs.add_func(cpn.extract_data, [data], save_result=True)
running_funcs.add_func(cpn.set_flowid, ['transform'])
running_funcs.add_func(cpn.transform, [], use_previews=True, save_result=True)
return running_funcs
@staticmethod
def union_data(previews_data, name_list):
if len(previews_data) == 0:
return None
if any([x is None for x in previews_data]):
return None
assert len(previews_data) == len(name_list)
def _append_name(value, name):
inst = copy.deepcopy(value)
if isinstance(inst.features, list):
inst.features.append(name)
else:
inst.features = np.append(inst.features, name)
return inst
result_data = None
for data, name in zip(previews_data, name_list):
# LOGGER.debug("before mapValues, one data: {}".format(data.first()))
f = functools.partial(_append_name, name=name)
data = data.mapValues(f)
# LOGGER.debug("after mapValues, one data: {}".format(data.first()))
if result_data is None:
result_data = data
else:
LOGGER.debug(
f"Before union, t1 count: {result_data.count()}, t2 count: {data.count()}"
)
result_data = result_data.union(data)
LOGGER.debug(f"After union, result count: {result_data.count()}")
# LOGGER.debug("before out loop, one data: {}".format(result_data.first()))
return result_data
def set_union_func(self, func):
self.union_data = func
| 15,748 | 39.17602 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/util/classify_label_checker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Lable Checker
# =============================================================================
from federatedml.util import consts
class ClassifyLabelChecker(object):
def __init__(self):
pass
@staticmethod
def validate_label(data_inst):
"""
Label Checker in classification task.
Check whether the distinct labels is no more than MAX_CLASSNUM which define in consts,
also get all distinct labels
Parameters
----------
data_inst : Table,
values are data instance format define in federatedml/feature/instance.py
Returns
-------
num_class : int, the number of distinct labels
labels : list, the distince labels
"""
class_set = data_inst.applyPartitions(ClassifyLabelChecker.get_all_class).reduce(lambda x, y: x | y)
num_class = len(class_set)
if len(class_set) > consts.MAX_CLASSNUM:
raise ValueError("In Classfy Proble, max dif classes should no more than %d" % (consts.MAX_CLASSNUM))
return num_class, list(class_set)
@staticmethod
def get_all_class(kv_iterator):
class_set = set()
for _, inst in kv_iterator:
class_set.add(inst.label)
if len(class_set) > consts.MAX_CLASSNUM:
raise ValueError("In Classify Task, max dif classes should no more than %d" % (consts.MAX_CLASSNUM))
return class_set
class RegressionLabelChecker(object):
@staticmethod
def validate_label(data_inst):
"""
Label Checker in regression task.
Check if all labels is a float type.
Parameters
----------
data_inst : Table,
values are data instance format define in federatedml/feature/instance.py
"""
data_inst.mapValues(RegressionLabelChecker.test_numeric_data)
@staticmethod
def test_numeric_data(value):
try:
label = float(value.label)
except BaseException:
raise ValueError("In Regression Task, all label should be numeric!!")
| 3,026 | 30.863158 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/util/data_io.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
import functools
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.protobuf.generated.data_io_meta_pb2 import DataIOMeta
from federatedml.protobuf.generated.data_io_meta_pb2 import ImputerMeta
from federatedml.protobuf.generated.data_io_meta_pb2 import OutlierMeta
from federatedml.protobuf.generated.data_io_param_pb2 import DataIOParam
from federatedml.protobuf.generated.data_io_param_pb2 import ImputerParam
from federatedml.protobuf.generated.data_io_param_pb2 import OutlierParam
from federatedml.statistic import data_overview
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
"""
# =============================================================================
# DenseFeatureReader
# =============================================================================
class DenseFeatureReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.exclusive_data_type = data_io_param.exclusive_data_type
self.missing_fill = data_io_param.missing_fill
self.default_value = data_io_param.default_value
self.missing_fill_method = data_io_param.missing_fill_method
self.missing_impute = data_io_param.missing_impute
self.outlier_replace = data_io_param.outlier_replace
self.outlier_replace_method = data_io_param.outlier_replace_method
self.outlier_impute = data_io_param.outlier_impute
self.outlier_replace_value = data_io_param.outlier_replace_value
self.with_label = data_io_param.with_label
self.label_name = data_io_param.label_name if self.with_label else None
self.label_type = data_io_param.label_type if self.with_label else None
self.output_format = data_io_param.output_format
self.missing_impute_rate = None
self.outlier_replace_rate = None
self.label_idx = None
self.header = None
self.sid_name = None
self.exclusive_data_type_fid_map = {}
def generate_header(self, input_data, mode="fit"):
header = input_data.schema["header"]
sid_name = input_data.schema["sid"]
LOGGER.debug("header is {}".format(header))
LOGGER.debug("sid_name is {}".format(sid_name))
if not header and not sid_name:
raise ValueError("dense input-format should have header schema")
header_gen = None
if self.with_label:
if mode == "fit":
if not header:
raise ValueError("dense input-format for fit stage should not be None if with_label is true")
self.label_idx = header.split(self.delimitor, -1).index(self.label_name)
header_gen = header.split(self.delimitor, -1)[: self.label_idx] + \
header.split(self.delimitor, -1)[self.label_idx + 1:] or None
elif header:
header_list = header.split(self.delimitor, -1)
if self.label_name in header_list:
self.label_idx = header_list.index(self.label_name)
header_gen = header.split(self.delimitor, -1)[: self.label_idx] + \
header.split(self.delimitor, -1)[self.label_idx + 1:] or None
else:
self.label_idx = None
header_gen = header.split(self.delimitor, -1)
elif header:
header_gen = header.split(self.delimitor, -1)
self.header = header_gen
self.sid_name = sid_name
if header_gen:
for i in range(len(header_gen)):
col_name = header_gen[i]
if self.exclusive_data_type is not None and col_name in self.exclusive_data_type:
self.exclusive_data_type_fid_map[i] = self.exclusive_data_type[col_name]
def get_schema(self):
schema = make_schema(self.header, self.sid_name, self.label_name)
return schema
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read dense data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
input_data_labels = None
fit_header = None
if mode == "transform":
fit_header = self.header
self.generate_header(input_data, mode=mode)
if self.label_idx is not None:
data_shape = data_overview.get_data_shape(input_data)
if not data_shape or self.label_idx >= data_shape:
raise ValueError("input data's value is empty, it does not contain a label")
input_data_features = input_data.mapValues(
lambda value: [] if data_shape == 1 else value.split(self.delimitor, -1)[:self.label_idx] + value.split(
self.delimitor, -1)[self.label_idx + 1:])
input_data_labels = input_data.mapValues(lambda value: value.split(self.delimitor, -1)[self.label_idx])
else:
input_data_features = input_data.mapValues(
lambda value: [] if not self.header else value.split(self.delimitor, -1))
if mode == "fit":
data_instance = self.fit(input_data, input_data_features, input_data_labels)
else:
data_instance = self.transform(input_data_features, input_data_labels)
# data_instance = ModelBase.align_data_header(data_instance, fit_header)
data_instance = data_overview.header_alignment(data_instance, fit_header)
return data_instance
def fit(self, input_data, input_data_features, input_data_labels):
raise ValueError("In Fate-v1.9 or later version, DataIO is deprecated, use DataTransform instead.")
schema = self.get_schema()
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "fit")
input_data_features = self.replace_outlier_value(input_data_features, "fit")
data_instance = self.gen_data_instance(input_data_features, input_data_labels)
set_schema(data_instance, schema)
return data_instance
@assert_io_num_rows_equal
def transform(self, input_data_features, input_data_labels):
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "transform")
input_data_features = self.replace_outlier_value(input_data_features, "transform")
data_instance = self.gen_data_instance(input_data_features, input_data_labels)
set_schema(data_instance, schema)
return data_instance
def fill_missing_value(self, input_data_features, mode="fit"):
if self.missing_fill:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.missing_impute)
if mode == "fit":
input_data_features, self.default_value = imputer_processor.fit(input_data_features,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def replace_outlier_value(self, input_data_features, mode="fit"):
if self.outlier_replace:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.outlier_impute)
if mode == "fit":
input_data_features, self.outlier_replace_value = \
imputer_processor.fit(input_data_features,
replace_method=self.outlier_replace_method,
replace_value=self.outlier_replace_value)
if self.outlier_impute is None:
self.outlier_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.outlier_replace_value)
self.outlier_replace_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def gen_data_instance(self, input_data_features, input_data_labels):
if self.label_idx is not None:
data_instance = input_data_features.join(input_data_labels,
lambda features, label:
self.to_instance(features, label))
else:
data_instance = input_data_features.mapValues(lambda features: self.to_instance(features))
return data_instance
def to_instance(self, features, label=None):
if self.header is None and len(features) != 0:
raise ValueError("features shape {} not equal to header shape 0".format(len(features)))
elif self.header is not None and len(self.header) != len(features):
raise ValueError("features shape {} not equal to header shape {}".format(len(features), len(self.header)))
if self.label_idx is not None:
if self.label_type == 'int':
label = int(label)
elif self.label_type in ["float", "float64"]:
label = float(label)
format_features = DenseFeatureReader.gen_output_format(
features,
self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
else:
format_features = DenseFeatureReader.gen_output_format(
features,
self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
return Instance(inst_id=None,
features=format_features,
label=label)
@staticmethod
def gen_output_format(features, data_type='float', exclusive_data_type_fid_map=None,
output_format='dense', missing_impute=None):
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
if output_format == "dense":
format_features = copy.deepcopy(features)
if data_type in ["int", "int64", "long", "float", "float64", "double"]:
for i in range(len(features)):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
format_features[i] = np.nan
if exclusive_data_type_fid_map:
for fid in range(len(features)):
if fid in exclusive_data_type_fid_map:
dtype = exclusive_data_type_fid_map[fid]
else:
dtype = data_type
format_features[fid] = getattr(np, dtype)(features[fid])
return np.asarray(format_features, dtype=object)
else:
return np.asarray(format_features, dtype=data_type)
indices = []
data = []
column_shape = len(features)
non_zero = 0
for i in range(column_shape):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
indices.append(i)
data.append(np.nan)
non_zero += 1
elif data_type in ['float', 'float64', "double"]:
if np.fabs(float(features[i])) < consts.FLOAT_ZERO:
continue
indices.append(i)
data.append(float(features[i]))
non_zero += 1
elif data_type in ['int', "int64", "long"]:
if int(features[i]) == 0:
continue
indices.append(i)
data.append(int(features[i]))
else:
indices.append(i)
data.append(features[i])
return SparseVector(indices, data, column_shape)
def get_summary(self):
if not self.missing_fill and not self.outlier_replace:
return {}
summary_buf = {}
if self.missing_fill:
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf["missing_fill_info"] = missing_summary
if self.outlier_replace:
outlier_replace_summary = dict()
outlier_replace_summary["outlier_value"] = list(self.outlier_impute)
outlier_replace_summary["outlier_replace_value"] = dict(zip(self.header, self.outlier_replace_value))
outlier_replace_summary["outlier_replace_rate"] = dict(zip(self.header, self.outlier_replace_rate))
summary_buf["outlier_replace_rate"] = outlier_replace_summary
return summary_buf
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="dense",
delimitor=self.delimitor,
data_type=self.data_type,
exclusive_data_type=self.exclusive_data_type,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="DenseFeatureReader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(self.outlier_replace,
self.outlier_replace_method,
self.outlier_impute,
self.outlier_replace_value,
self.outlier_replace_rate,
self.header,
"Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, self.exclusive_data_type, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = \
load_data_io_model("DenseFeatureReader", model_meta, model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
self.outlier_replace, self.outlier_replace_method, \
self.outlier_impute, self.outlier_replace_value = load_outlier_model(self.header,
"Outlier",
model_meta.outlier_meta,
model_param.outlier_param)
# =============================================================================
# SparseFeatureReader: mainly for libsvm input format
# =============================================================================
class SparseFeatureReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.label_type = data_io_param.label_type
self.output_format = data_io_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = data_io_param.label_name
def get_max_feature_index(self, line, delimitor=' '):
if line.strip() == '':
raise ValueError("find an empty line, please check!!!")
cols = line.split(delimitor, -1)
if len(cols) <= 1:
return -1
return max([int(fid_value.split(":", -1)[0]) for fid_value in cols[1:]])
def generate_header(self, max_feature):
self.header = [str(i) for i in range(max_feature + 1)]
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if not data_overview.get_data_shape(input_data):
raise ValueError("input data's value is empty, it does not contain a label")
if mode == "fit":
data_instance = self.fit(input_data)
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(data_instance, schema)
return data_instance
def fit(self, input_data):
get_max_fid = functools.partial(self.get_max_feature_index, delimitor=self.delimitor)
max_feature = input_data.mapValues(get_max_fid).reduce(lambda max_fid1, max_fid2: max(max_fid1, max_fid2))
if max_feature == -1:
raise ValueError("no feature value in input data, please check!")
self.generate_header(max_feature)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def transform(self, input_data):
max_feature = len(self.header)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def gen_data_instance(self, input_data, max_feature):
params = [self.delimitor, self.data_type,
self.label_type,
self.output_format, max_feature]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
label_type = param_list[2]
output_format = param_list[3]
max_fid = param_list[4]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
label = cols[0]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
fid_value = []
for i in range(1, len(cols)):
fid, val = cols[i].split(":", -1)
fid = int(fid)
if data_type in ["float", "float64"]:
val = float(val)
elif data_type in ["int", "int64"]:
val = int(val)
fid_value.append((fid, val))
if output_format == "dense":
features = [0 for i in range(max_fid + 1)]
for fid, val in fid_value:
features[fid] = val
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fid, val in fid_value:
indices.append(fid)
data.append(val)
features = SparseVector(indices, data, max_fid + 1)
return Instance(inst_id=None,
features=features,
label=label)
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="sparse",
delimitor=self.delimitor,
data_type=self.data_type,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="SparseFeatureReader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(missing_fill=False,
model_name="Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, _1, _2, _3, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = load_data_io_model(
"SparseFeatureReader",
model_meta,
model_param)
# =============================================================================
# SparseTagReader: mainly for tag data
# =============================================================================
class SparseTagReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.tag_with_value = data_io_param.tag_with_value
self.tag_value_delimitor = data_io_param.tag_value_delimitor
self.with_label = data_io_param.with_label
self.label_type = data_io_param.label_type if self.with_label else None
self.output_format = data_io_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = data_io_param.label_name if self.with_label else None
self.missing_fill = data_io_param.missing_fill
self.missing_fill_method = data_io_param.missing_fill_method
self.default_value = data_io_param.default_value
self.missing_impute_rate = None
self.missing_impute = None
@staticmethod
def agg_tag(kvs, delimitor=' ', with_label=True, tag_with_value=False, tag_value_delimitor=":"):
tags_set = set()
for key, value in kvs:
if with_label:
cols = value.split(delimitor, -1)[1:]
else:
cols = value.split(delimitor, -1)[0:]
if tag_with_value is False:
tags = cols
else:
tags = [fea_value.split(tag_value_delimitor, -1)[0] for fea_value in cols]
tags_set |= set(tags)
return tags_set
def generate_header(self, tags):
self.header = tags
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if mode == "fit":
data_instance = self.fit(input_data)
if self.with_label:
self.label_name = "label"
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(data_instance, schema)
return data_instance
@staticmethod
def change_tag_to_str(value, tags_dict=None, delimitor=",", with_label=False, tag_value_delimitor=":"):
vals = value.split(delimitor, -1)
ret = [''] * len(tags_dict)
if with_label:
vals = vals[1:]
for i in range(len(vals)):
tag, value = vals[i].split(tag_value_delimitor, -1)
idx = tags_dict.get(tag, None)
if idx is not None:
ret[idx] = value
return ret
@staticmethod
def change_str_to_tag(value, tags_dict=None, delimitor=",", tag_value_delimitor=":"):
ret = [None] * len(tags_dict)
tags = sorted(list(tags_dict.keys()))
for i in range(len(value)):
tag, val = tags[i], value[i]
ret[i] = tag_value_delimitor.join([tag, val])
return delimitor.join(ret)
def fill_missing_value(self, input_data, tags_dict, mode="fit"):
str_trans_method = functools.partial(self.change_tag_to_str,
tags_dict=tags_dict,
delimitor=self.delimitor,
with_label=self.with_label,
tag_value_delimitor=self.tag_value_delimitor)
input_data = input_data.mapValues(str_trans_method)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data, schema)
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer()
if mode == "fit":
data, self.default_value = imputer_processor.fit(input_data,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
LOGGER.debug("self.default_value is {}".format(self.default_value))
else:
data = imputer_processor.transform(input_data,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
LOGGER.debug("self.missing_impute is {}".format(self.missing_impute))
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
str_trans_tag_method = functools.partial(self.change_str_to_tag,
tags_dict=tags_dict,
delimitor=self.delimitor,
tag_value_delimitor=self.tag_value_delimitor)
data = data.mapValues(str_trans_tag_method)
return data
def fit(self, input_data):
tag_aggregator = functools.partial(SparseTagReader.agg_tag,
delimitor=self.delimitor,
with_label=self.with_label,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor)
tags_set_list = list(input_data.applyPartitions(tag_aggregator).collect())
tags_set = set()
for _, _tags_set in tags_set_list:
tags_set |= _tags_set
tags = list(tags_set)
tags = sorted(tags)
tags_dict = dict(zip(tags, range(len(tags))))
self.generate_header(tags)
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="fit")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def transform(self, input_data):
tags_dict = dict(zip(self.header, range(len(self.header))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="transform")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def gen_data_instance(self, input_data, tags_dict):
params = [self.delimitor,
self.data_type,
self.tag_with_value,
self.tag_value_delimitor,
self.with_label,
self.label_type,
self.output_format,
tags_dict]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
def get_summary(self):
if not self.missing_fill:
return {}
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf = {"missing_fill_info": missing_summary}
return summary_buf
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
tag_with_value = param_list[2]
tag_value_delimitor = param_list[3]
with_label = param_list[4]
label_type = param_list[5]
output_format = param_list[6]
tags_dict = param_list[7]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
start_pos = 0
label = None
if with_label:
start_pos = 1
label = cols[0]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
if output_format == "dense":
features = [0 for i in range(len(tags_dict))]
for fea in cols[start_pos:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
if _tag in tags_dict:
features[tags_dict.get(_tag)] = _val
else:
if fea in tags_dict:
features[tags_dict.get(fea)] = 1
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fea in cols[start_pos:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
else:
_tag = fea
_val = 1
if _tag not in tags_dict:
continue
indices.append(tags_dict.get(_tag))
if data_type in ["float", "float64"]:
_val = float(_val)
elif data_type in ["int", "int64", "long"]:
_val = int(_val)
elif data_type == "str":
_val = str(_val)
data.append(_val)
features = SparseVector(indices, data, len(tags_dict))
return Instance(inst_id=None,
features=features,
label=label)
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="tag",
delimitor=self.delimitor,
data_type=self.data_type,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="Reader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, self.tag_with_value, self.tag_value_delimitor, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = load_data_io_model(
"SparseTagReader",
model_meta,
model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
class DataIO(ModelBase):
def __init__(self):
super(DataIO, self).__init__()
self.reader = None
from federatedml.param.dataio_param import DataIOParam
self.model_param = DataIOParam()
def _init_model(self, model_param):
LOGGER.warning('DataIO is deprecated, and will be removed in 1.7, use DataTransform module instead')
if model_param.input_format == "dense":
self.reader = DenseFeatureReader(self.model_param)
elif model_param.input_format == "sparse":
self.reader = SparseFeatureReader(self.model_param)
elif model_param.input_format == "tag":
self.reader = SparseTagReader(self.model_param)
self.model_param = model_param
def load_model(self, model_dict):
input_model_param = None
input_model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
input_model_meta = value[model]
if model.endswith("Param"):
input_model_param = value[model]
if input_model_meta.input_format == "dense":
self.reader = DenseFeatureReader(self.model_param)
elif input_model_meta.input_format == "sparse":
self.reader = SparseFeatureReader(self.model_param)
elif input_model_meta.input_format == "tag":
self.reader = SparseTagReader(self.model_param)
self.reader.load_model(input_model_meta, input_model_param)
def fit(self, data_inst):
data_inst = self.reader.read_data(data_inst, "fit")
if isinstance(self.reader, (DenseFeatureReader, SparseTagReader)):
summary_buf = self.reader.get_summary()
if summary_buf:
self.set_summary(summary_buf)
return data_inst
def transform(self, data_inst):
return self.reader.read_data(data_inst, "transform")
def export_model(self):
model_dict = self.reader.save_model()
model_dict["DataIOMeta"].need_run = self.need_run
return model_dict
def make_schema(header=None, sid_name=None, label_name=None):
schema = {}
if header:
schema["header"] = header
if sid_name:
schema["sid_name"] = sid_name
if label_name:
schema["label_name"] = label_name
ModelBase.check_schema_content(schema)
return schema
def set_schema(data_instance, schema):
data_instance.schema = schema
def save_data_io_model(input_format="dense",
delimitor=",",
data_type="str",
exclusive_data_type=None,
tag_with_value=False,
tag_value_delimitor=":",
with_label=False,
label_name='',
label_type="int",
output_format="dense",
header=None,
sid_name=None,
model_name="DataIO"):
model_meta = DataIOMeta()
model_param = DataIOParam()
model_meta.input_format = input_format
model_meta.delimitor = delimitor
model_meta.data_type = data_type
model_meta.tag_with_value = tag_with_value
model_meta.tag_value_delimitor = tag_value_delimitor
model_meta.with_label = with_label
if with_label:
model_meta.label_name = label_name
model_meta.label_type = label_type
model_meta.output_format = output_format
if header is not None:
model_param.header.extend(header)
if sid_name:
model_param.sid_name = sid_name
if label_name:
model_param.label_name = label_name
if exclusive_data_type is not None:
model_meta.exclusive_data_type.update(exclusive_data_type)
return model_meta, model_param
def load_data_io_model(model_name="DataIO",
model_meta=None,
model_param=None):
delimitor = model_meta.delimitor
data_type = model_meta.data_type
tag_with_value = model_meta.tag_with_value
tag_value_delimitor = model_meta.tag_value_delimitor
with_label = model_meta.with_label
label_name = model_meta.label_name if with_label else None
label_type = model_meta.label_type if with_label else None
output_format = model_meta.output_format
header = list(model_param.header) or None
sid_name = None
if model_param.sid_name:
sid_name = model_param.sid_name
exclusive_data_type = None
if model_meta.exclusive_data_type:
exclusive_data_type = {}
for col_name in model_meta.exclusive_data_type:
exclusive_data_type[col_name] = model_meta.exclusive_data_type.get(col_name)
return delimitor, data_type, exclusive_data_type, tag_with_value, tag_value_delimitor, with_label, \
label_type, output_format, header, sid_name, label_name
def save_missing_imputer_model(missing_fill=False,
missing_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
model_name="Imputer"):
model_meta = ImputerMeta()
model_param = ImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method:
model_meta.strategy = str(missing_replace_method)
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
if missing_fill_value is not None:
feature_value_dict = dict(zip(header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
return model_meta, model_param
def load_missing_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = model_meta.missing_value
missing_fill_value = model_param.missing_replace_value
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = list(missing_value)
if missing_fill_value:
missing_fill_value = [missing_fill_value.get(head) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value
def save_outlier_model(outlier_replace=False,
outlier_replace_method=None,
outlier_impute=None,
outlier_replace_value=None,
outlier_replace_rate=None,
header=None,
model_name="Outlier"):
model_meta = OutlierMeta()
model_param = OutlierParam()
model_meta.is_outlier = outlier_replace
if outlier_replace:
if outlier_replace_method:
model_meta.strategy = str(outlier_replace_method)
if outlier_impute:
model_meta.outlier_value.extend(map(str, outlier_impute))
if outlier_replace_value:
outlier_value_dict = dict(zip(header, map(str, outlier_replace_value)))
model_param.outlier_replace_value.update(outlier_value_dict)
if outlier_replace_rate:
outlier_value_ratio_dict = dict(zip(header, outlier_replace_rate))
model_param.outlier_value_ratio.update(outlier_value_ratio_dict)
return model_meta, model_param
def load_outlier_model(header=None,
model_name="Outlier",
model_meta=None,
model_param=None):
outlier_replace = model_meta.is_outlier
outlier_replace_method = model_meta.strategy
outlier_value = model_meta.outlier_value
outlier_replace_value = model_param.outlier_replace_value
if outlier_replace:
if not outlier_replace_method:
outlier_replace_method = None
if not outlier_value:
outlier_value = None
else:
outlier_value = list(outlier_value)
if outlier_replace_value:
outlier_replace_value = [outlier_replace_value.get(head) for head in header]
else:
outlier_replace_value = None
else:
outlier_replace_method = None
outlier_value = None
outlier_replace_value = None
return outlier_replace, outlier_replace_method, outlier_value, outlier_replace_value
"""
class DataIO(ModelBase):
def __init__(self):
super(DataIO, self).__init__()
from federatedml.param.data_transform_param import DataTransformParam
from federatedml.util.data_transform import DataTransform
self.model_param = DataTransformParam()
self._transformer = DataTransform()
def _init_model(self, model_param):
LOGGER.warning('DataIO is deprecated, use DataTransform module instead')
self._transformer._init_model(model_param)
def load_model(self, model_dict):
self._transformer.load_model(model_dict)
def fit(self, data_inst):
raise ValueError("In Fate-v1.9 or later version, DataIO is deprecated, use DataTransform instead.")
def transform(self, data_inst):
self._transformer.role = self.role
self._transformer.component_properties = self.component_properties
return self._transformer.transform(data_inst)
def export_model(self):
return self._transformer.export_model()
| 47,723 | 40.716783 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/util/sample_weight.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from federatedml.model_base import Metric, MetricMeta
from federatedml.model_base import ModelBase
from federatedml.statistic import data_overview
from federatedml.param.sample_weight_param import SampleWeightParam
from federatedml.protobuf.generated.sample_weight_model_meta_pb2 import SampleWeightModelMeta
from federatedml.protobuf.generated.sample_weight_model_param_pb2 import SampleWeightModelParam
from federatedml.statistic.data_overview import get_label_count, check_negative_sample_weight
from federatedml.util import consts, LOGGER
class SampleWeight(ModelBase):
def __init__(self):
super().__init__()
self.model_param = SampleWeightParam()
self.metric_name = "sample_weight"
self.metric_namespace = "train"
self.metric_type = "SAMPLE_WEIGHT"
self.model_meta_name = "SampleWeightModelMeta"
self.model_param_name = "SampleWeightModelParam"
self.weight_mode = None
self.header = None
self.class_weight_dict = None
def _init_model(self, params):
self.model_param = params
self.class_weight = params.class_weight
self.sample_weight_name = params.sample_weight_name
self.normalize = params.normalize
self.need_run = params.need_run
@staticmethod
def get_class_weight(data_instances):
class_weight = get_label_count(data_instances)
n_samples = data_instances.count()
n_classes = len(class_weight.keys())
res_class_weight = {str(k): n_samples / (n_classes * v) for k, v in class_weight.items()}
return res_class_weight
@staticmethod
def replace_weight(data_instance, class_weight, weight_loc=None, weight_base=None):
weighted_data_instance = copy.copy(data_instance)
original_features = weighted_data_instance.features
if weight_loc is not None:
if weight_base is not None:
inst_weight = original_features[weight_loc] / weight_base
else:
inst_weight = original_features[weight_loc]
weighted_data_instance.set_weight(inst_weight)
weighted_data_instance.features = original_features[np.arange(original_features.shape[0]) != weight_loc]
else:
weighted_data_instance.set_weight(class_weight.get(str(data_instance.label), 1))
return weighted_data_instance
@staticmethod
def assign_sample_weight(data_instances, class_weight, weight_loc, normalize):
weight_base = None
if weight_loc is not None and normalize:
def sum_sample_weight(kv_iterator):
sample_weight = 0
for _, inst in kv_iterator:
sample_weight += inst.features[weight_loc]
return sample_weight
weight_sum = data_instances.mapPartitions(sum_sample_weight).reduce(lambda x, y: x + y)
# LOGGER.debug(f"weight_sum is {weight_sum}")
weight_base = weight_sum / data_instances.count()
# LOGGER.debug(f"weight_base is {weight_base}")
return data_instances.mapValues(lambda v: SampleWeight.replace_weight(v, class_weight, weight_loc, weight_base))
@staticmethod
def get_weight_loc(data_instances, sample_weight_name):
weight_loc = None
if sample_weight_name:
try:
weight_loc = data_instances.schema["header"].index(sample_weight_name)
except ValueError:
return
return weight_loc
def transform_weighted_instance(self, data_instances, weight_loc):
if self.class_weight and self.class_weight == 'balanced':
self.class_weight_dict = SampleWeight.get_class_weight(data_instances)
else:
if self.class_weight_dict is None:
self.class_weight_dict = self.class_weight
return SampleWeight.assign_sample_weight(data_instances, self.class_weight_dict, weight_loc, self.normalize)
def callback_info(self):
class_weight = None
classes = None
if self.class_weight_dict:
class_weight = {str(k): v for k, v in self.class_weight_dict.items()}
classes = sorted([str(k) for k in self.class_weight_dict.keys()])
# LOGGER.debug(f"callback class weight is: {class_weight}")
metric_meta = MetricMeta(name='train',
metric_type=self.metric_type,
extra_metas={
"weight_mode": self.weight_mode,
"class_weight": class_weight,
"classes": classes,
"sample_weight_name": self.sample_weight_name
})
self.callback_metric(metric_name=self.metric_name,
metric_namespace=self.metric_namespace,
metric_data=[Metric(self.metric_name, 0)])
self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,
metric_name=self.metric_name,
metric_meta=metric_meta)
def export_model(self):
meta_obj = SampleWeightModelMeta(sample_weight_name=self.sample_weight_name,
normalize=self.normalize,
need_run=self.need_run)
param_obj = SampleWeightModelParam(header=self.header,
weight_mode=self.weight_mode,
class_weight=self.class_weight_dict)
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result
def load_model(self, model_dict):
param_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
self.header = list(param_obj.header)
self.need_run = meta_obj.need_run
self.weight_mode = param_obj.weight_mode
if self.weight_mode == "class weight":
self.class_weight_dict = {k: v for k, v in param_obj.class_weight.items()}
elif self.weight_mode == "sample weight name":
self.sample_weight_name = meta_obj.sample_weight_name
self.normalize = meta_obj.normalize
else:
raise ValueError(f"Unknown weight mode {self.weight_mode} loaded. "
f"Only support 'class weight' and 'sample weight name'")
def transform(self, data_instances):
LOGGER.info(f"Enter Sample Weight Transform")
new_schema = copy.deepcopy(data_instances.schema)
new_schema["sample_weight"] = "weight"
weight_loc = None
if self.weight_mode == "sample weight name":
weight_loc = SampleWeight.get_weight_loc(data_instances, self.sample_weight_name)
if weight_loc is not None:
new_schema["header"].pop(weight_loc)
else:
LOGGER.warning(f"Cannot find weight column of given sample_weight_name '{self.sample_weight_name}'."
f"Original input data returned")
return data_instances
result_instances = self.transform_weighted_instance(data_instances, weight_loc)
result_instances.schema = new_schema
self.callback_info()
if result_instances.mapPartitions(check_negative_sample_weight).reduce(lambda x, y: x or y):
LOGGER.warning(f"Negative weight found in weighted instances.")
return result_instances
def fit(self, data_instances):
if self.sample_weight_name is None and self.class_weight is None:
return data_instances
self.header = data_overview.get_header(data_instances)
if self.class_weight:
self.weight_mode = "class weight"
if self.sample_weight_name and self.class_weight:
LOGGER.warning(f"Both 'sample_weight_name' and 'class_weight' provided. "
f"Only weight from 'sample_weight_name' is used.")
new_schema = copy.deepcopy(data_instances.schema)
new_schema["sample_weight"] = "weight"
weight_loc = None
if self.sample_weight_name:
self.weight_mode = "sample weight name"
weight_loc = SampleWeight.get_weight_loc(data_instances, self.sample_weight_name)
if weight_loc is not None:
new_schema["header"].pop(weight_loc)
else:
raise ValueError(f"Cannot find weight column of given sample_weight_name '{self.sample_weight_name}'.")
result_instances = self.transform_weighted_instance(data_instances, weight_loc)
result_instances.schema = new_schema
self.callback_info()
if result_instances.mapPartitions(check_negative_sample_weight).reduce(lambda x, y: x or y):
LOGGER.warning(f"Negative weight found in weighted instances.")
return result_instances
| 9,813 | 44.859813 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/util/label_transform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from federatedml.model_base import Metric, MetricMeta
from federatedml.model_base import ModelBase
from federatedml.param.label_transform_param import LabelTransformParam
from federatedml.protobuf.generated import label_transform_meta_pb2, label_transform_param_pb2
from federatedml.statistic.data_overview import get_label_count, get_predict_result_labels, \
predict_detail_dict_to_str, predict_detail_str_to_dict
from federatedml.util import LOGGER
class LabelTransformer(ModelBase):
def __init__(self):
super().__init__()
self.model_param = LabelTransformParam()
self.metric_name = "label_transform"
self.metric_namespace = "train"
self.metric_type = "LABEL_TRANSFORM"
self.model_param_name = 'LabelTransformParam'
self.model_meta_name = 'LabelTransformMeta'
self.weight_mode = None
self.encoder_key_type = None
self.encoder_value_type = None
self.label_encoder = None
self.label_list = None
def _init_model(self, params):
self.model_param = params
self.label_encoder = params.label_encoder
self.label_list = params.label_list
self.need_run = params.need_run
def update_label_encoder(self, data):
if self.label_encoder is not None:
LOGGER.info(f"label encoder provided")
LOGGER.info("count labels in data.")
data_type = data.schema.get("content_type")
if data_type is None:
label_count = get_label_count(data)
labels = sorted(label_count.keys())
# predict result
else:
labels = sorted(get_predict_result_labels(data))
if self.label_list is not None:
LOGGER.info(f"label list provided")
self.encoder_key_type = {str(v): type(v).__name__ for v in self.label_list}
else:
self.encoder_key_type = {str(v): type(v).__name__ for v in labels}
if len(labels) != len(self.label_encoder):
missing_values = [k for k in labels if str(k) not in self.label_encoder]
LOGGER.warning(f"labels: {missing_values} found in input data "
f"but are not matched in provided label_encoder. "
f"Note that unmatched labels will not be transformed.")
self.label_encoder.update(zip([str(k) for k in missing_values],
missing_values))
self.encoder_key_type.update(zip([str(k) for k in missing_values],
[type(v).__name__ for v in missing_values]))
else:
data_type = data.schema.get("content_type")
if data_type is None:
label_count = get_label_count(data)
labels = sorted(label_count.keys())
# predict result
else:
labels = sorted(get_predict_result_labels(data))
self.label_encoder = dict(zip(labels, range(len(labels))))
if self.encoder_key_type is None:
self.encoder_key_type = {str(k): type(k).__name__ for k in self.label_encoder.keys()}
self.encoder_value_type = {str(k): type(v).__name__ for k, v in self.label_encoder.items()}
self.label_encoder = {load_value_to_type(k,
self.encoder_key_type.get(str(k), None)): v for k,
v in self.label_encoder.items()}
for k, v in self.label_encoder.items():
if v is None:
raise ValueError(f"given encoder key {k} not found in data or provided label list, please check.")
def _get_meta(self):
meta = label_transform_meta_pb2.LabelTransformMeta(
need_run=self.need_run
)
return meta
def _get_param(self):
label_encoder = self.label_encoder
if self.label_encoder is not None:
label_encoder = {str(k): str(v) for k, v in self.label_encoder.items()}
param = label_transform_param_pb2.LabelTransformParam(
label_encoder=label_encoder,
encoder_key_type=self.encoder_key_type,
encoder_value_type=self.encoder_value_type)
return param
def export_model(self):
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
self.model_output = result
return result
def load_model(self, model_dict):
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
param_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
self.need_run = meta_obj.need_run
self.encoder_key_type = param_obj.encoder_key_type
self.encoder_value_type = param_obj.encoder_value_type
self.label_encoder = {
load_value_to_type(k, self.encoder_key_type[k]): load_value_to_type(v, self.encoder_value_type[k])
for k, v in param_obj.label_encoder.items()
}
return
def callback_info(self):
metric_meta = MetricMeta(name='train',
metric_type=self.metric_type,
extra_metas={
"label_encoder": self.label_encoder
})
self.callback_metric(metric_name=self.metric_name,
metric_namespace=self.metric_namespace,
metric_data=[Metric(self.metric_name, 0)])
self.tracker.set_metric_meta(metric_namespace=self.metric_namespace,
metric_name=self.metric_name,
metric_meta=metric_meta)
@staticmethod
def replace_instance_label(instance, label_encoder):
new_instance = copy.deepcopy(instance)
label_replace_val = label_encoder.get(instance.label)
if label_replace_val is None:
raise ValueError(f"{instance.label} not found in given label encoder")
new_instance.label = label_replace_val
return new_instance
@staticmethod
def replace_predict_label(predict_inst, label_encoder):
transform_predict_inst = copy.deepcopy(predict_inst)
true_label, predict_label, predict_score, predict_detail_str, result_type = transform_predict_inst.features
predict_detail = predict_detail_str_to_dict(predict_detail_str)
true_label_replace_val, predict_label_replace_val = label_encoder.get(
true_label), label_encoder.get(predict_label)
if true_label_replace_val is None:
raise ValueError(f"{true_label_replace_val} not found in given label encoder")
if predict_label_replace_val is None:
raise ValueError(f"{predict_label_replace_val} not found in given label encoder")
label_encoder_detail = {str(k): v for k, v in label_encoder.items()}
predict_detail_dict = {label_encoder_detail[label]: score for label, score in predict_detail.items()}
predict_detail = predict_detail_dict_to_str(predict_detail_dict)
transform_predict_inst.features = [true_label_replace_val, predict_label_replace_val, predict_score,
predict_detail, result_type]
return transform_predict_inst
@staticmethod
def replace_predict_label_cluster(predict_inst, label_encoder):
transform_predict_inst = copy.deepcopy(predict_inst)
true_label, predict_label = transform_predict_inst.features[0], transform_predict_inst.features[1]
true_label, predict_label = label_encoder[true_label], label_encoder[predict_label]
transform_predict_inst.features = [true_label, predict_label]
return transform_predict_inst
@staticmethod
def transform_data_label(data, label_encoder):
data_type = data.schema.get("content_type")
if data_type == "cluster_result":
return data.mapValues(lambda v: LabelTransformer.replace_predict_label_cluster(v, label_encoder))
elif data_type == "predict_result":
predict_detail = data.first()[1].features[3]
if predict_detail == 1 and list(predict_detail.keys())[0] == "label":
LOGGER.info(f"Regression prediction result provided. Original data returned.")
return data
return data.mapValues(lambda v: LabelTransformer.replace_predict_label(v, label_encoder))
elif data_type is None:
return data.mapValues(lambda v: LabelTransformer.replace_instance_label(v, label_encoder))
else:
raise ValueError(f"unknown data type: {data_type} encountered. Label transform aborted.")
def transform(self, data):
LOGGER.info(f"Enter Label Transformer Transform")
if self.label_encoder is None:
raise ValueError(f"Input Label Encoder is None. Label Transform aborted.")
label_encoder = self.label_encoder
data_type = data.schema.get("content_type")
# revert label encoding if predict result
if data_type is not None:
label_encoder = dict(zip(self.label_encoder.values(), self.label_encoder.keys()))
result_data = LabelTransformer.transform_data_label(data, label_encoder)
result_data.schema = data.schema
self.callback_info()
return result_data
def fit(self, data):
LOGGER.info(f"Enter Label Transform Fit")
self.update_label_encoder(data)
result_data = LabelTransformer.transform_data_label(data, self.label_encoder)
result_data.schema = data.schema
self.callback_info()
return result_data
# also used in feature imputation, to be moved to common util
def load_value_to_type(value, value_type):
if value is None:
loaded_value = None
elif value_type in ["int", "int64", "long", "float", "float64", "double"]:
loaded_value = getattr(np, value_type)(value)
elif value_type in ["str", "_str"]:
loaded_value = str(value)
else:
raise ValueError(f"unknown value type: {value_type}")
return loaded_value
| 11,018 | 43.792683 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/util/fixpoint_solver.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
class FixedPointEncoder(object):
def __init__(self, fixpoint_precision=2**23):
self._fixpoint_precision = fixpoint_precision
def encode(self, obj):
if isinstance(obj, np.ndarray):
if not np.issubdtype(obj.dtype, np.number):
obj = obj.astype(float)
fixed_obj = np.round(obj * self._fixpoint_precision, 0).astype(int)
elif isinstance(obj, list):
fixed_obj = np.round(np.array(obj) * self._fixpoint_precision, 0).astype(int).to_list()
else:
raise ValueError("FixPointEncoder Not support type {}".format(type(obj)))
return fixed_obj
def decode(self, obj):
if isinstance(obj, np.ndarray):
decode_obj = obj / self._fixpoint_precision
elif isinstance(obj, list):
decode_obj = (np.array(obj) / self._fixpoint_precision).to_list()
else:
raise ValueError("FixPointEncoder Not support type {}".format(type(obj)))
return decode_obj
| 1,692 | 35.021277 | 99 |
py
|
FATE
|
FATE-master/python/federatedml/util/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.util._log import LOGGER
__all__ = ["LOGGER"]
| 680 | 33.05 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/util/data_transform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
import functools
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformImputerMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformOutlierMeta
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformImputerParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformOutlierParam
from federatedml.statistic import data_overview
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.util.data_format_preprocess import DataFormatPreProcess
from federatedml.util.anonymous_generator_util import Anonymous
# =============================================================================
# DenseFeatureTransformer
# =============================================================================
class DenseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.missing_fill = data_transform_param.missing_fill
self.default_value = data_transform_param.default_value
self.missing_fill_method = data_transform_param.missing_fill_method
self.missing_impute = data_transform_param.missing_impute
self.outlier_replace = data_transform_param.outlier_replace
self.outlier_replace_method = data_transform_param.outlier_replace_method
self.outlier_impute = data_transform_param.outlier_impute
self.outlier_replace_value = data_transform_param.outlier_replace_value
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name.lower() if self.with_label else None
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.missing_impute_rate = None
self.outlier_replace_rate = None
self.header = None
self.sid_name = None
self.exclusive_data_type_fid_map = {}
self.match_id_name = data_transform_param.match_id_name
self.match_id_index = 0
self.with_match_id = data_transform_param.with_match_id
self.anonymous_generator = None
self.anonymous_header = None
if data_transform_param.exclusive_data_type:
self.exclusive_data_type = dict([(k.lower(), v)
for k, v in data_transform_param.exclusive_data_type.items()])
else:
self.exclusive_data_type = None
def _update_param(self, schema):
meta = schema["meta"]
self.delimitor = meta.get("delimiter", ",")
self.data_type = meta.get("data_type")
self.with_label = meta.get("with_label", False)
if self.with_label:
self.label_type = meta.get("label_type", "int")
self.label_name = meta.get("label_name", '')
self.with_match_id = meta.get("with_match_id", False)
if self.with_match_id:
match_id_name = schema.get("match_id_name", [])
if not self.match_id_name:
if isinstance(match_id_name, list) and len(self.match_id_name) > 1:
raise ValueError("Multiple Match ID exist, please specified the one to use")
self.match_id_name = match_id_name[0] if isinstance(match_id_name, list) else match_id_name
self.match_id_index = schema["original_index_info"]["match_id_index"][0]
else:
try:
idx = match_id_name.index(self.match_id_name)
except ValueError:
raise ValueError(f"Can not find {self.match_id_name} in {match_id_name}")
self.match_id_index = schema["original_index_info"]["match_id_index"][idx]
schema["match_id_name"] = self.match_id_name
header = schema["header"]
exclusive_data_type = meta.get("exclusive_data_type", None)
if exclusive_data_type:
self._init_exclusive_data_type(exclusive_data_type, header)
def _init_exclusive_data_type(self, exclusive_data_type, header):
self.exclusive_data_type = dict([(k.lower(), v) for k, v in exclusive_data_type.items()])
for idx, col_name in enumerate(header):
if col_name in self.exclusive_data_type:
self.exclusive_data_type_fid_map[idx] = self.exclusive_data_type[col_name]
def extract_feature_value(self, value, header_index=None):
if not header_index:
return []
value = value.split(self.delimitor, -1)
if len(value) <= header_index[-1]:
raise ValueError("Feature shape is smaller than header shape")
feature_values = []
for idx in header_index:
feature_values.append(value[idx])
return feature_values
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read dense data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
schema = copy.deepcopy(input_data.schema)
if not schema.get("meta"):
LOGGER.warning("Data meta is supported to be set with data uploading or binding, "
"please refer to data transform using guides.")
meta = dict(input_format="dense",
delimiter=self.delimitor,
with_label=self.with_label,
label_name=self.label_name,
with_match_id=self.with_match_id,
data_type=self.data_type,
)
if mode == "transform" and self.with_label \
and self.label_name not in schema["header"].split(self.delimitor, -1):
del meta["label_name"]
del meta["with_label"]
schema["meta"] = meta
generated_header = DataFormatPreProcess.generate_header(input_data, schema)
schema.update(generated_header)
schema = self.anonymous_generator.generate_anonymous_header(schema)
set_schema(input_data, schema)
if self.exclusive_data_type:
self._init_exclusive_data_type(self.exclusive_data_type, schema["header"])
else:
self._update_param(schema)
header = schema["header"]
anonymous_header = schema["anonymous_header"]
training_header = self.header
if mode == "transform":
if (set(self.header) & set(header)) != set(self.header):
raise ValueError(f"Transform Data's header is {header}, expect {self.header}")
self.header = header
if not self.anonymous_header:
self.anonymous_header = anonymous_header
else:
self.header = header
self.anonymous_header = anonymous_header
header_index = schema["original_index_info"]["header_index"]
extract_feature_func = functools.partial(self.extract_feature_value,
header_index=header_index)
input_data_features = input_data.mapValues(extract_feature_func)
# input_data_features.schema = input_data.schema
input_data_features.schema = schema
input_data_labels = None
input_data_match_id = None
if "label_name" in schema:
label_index = schema["original_index_info"]["label_index"]
input_data_labels = input_data.mapValues(lambda value: value.split(self.delimitor, -1)[label_index])
if self.with_match_id:
input_data_match_id = input_data.mapValues(
lambda value: value.split(self.delimitor, -1)[self.match_id_index])
if mode == "fit":
data_instance = self.fit(input_data, input_data_features, input_data_labels, input_data_match_id)
set_schema(data_instance, schema)
else:
data_instance = self.transform(input_data_features, input_data_labels, input_data_match_id)
data_instance = data_overview.header_alignment(data_instance, training_header, self.anonymous_header)
self.header = training_header
return data_instance
def fit(self, input_data, input_data_features, input_data_labels, input_data_match_id):
input_data_features = self.fill_missing_value(input_data_features, "fit")
input_data_features = self.replace_outlier_value(input_data_features, "fit")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
return data_instance
@assert_io_num_rows_equal
def transform(self, input_data_features, input_data_labels, input_data_match_id):
schema = input_data_features.schema
input_data_features = self.fill_missing_value(input_data_features, "transform")
input_data_features = self.replace_outlier_value(input_data_features, "transform")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
data_instance.schema = schema
return data_instance
def fill_missing_value(self, input_data_features, mode="fit"):
if self.missing_fill:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.missing_impute)
if mode == "fit":
input_data_features, self.default_value = imputer_processor.fit(input_data_features,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def replace_outlier_value(self, input_data_features, mode="fit"):
if self.outlier_replace:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.outlier_impute)
if mode == "fit":
input_data_features, self.outlier_replace_value = \
imputer_processor.fit(input_data_features,
replace_method=self.outlier_replace_method,
replace_value=self.outlier_replace_value)
if self.outlier_impute is None:
self.outlier_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.outlier_replace_value)
self.outlier_replace_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def gen_data_instance(self, input_data_features, input_data_labels, input_data_match_id):
if input_data_labels:
data_instance = input_data_features.join(input_data_labels,
lambda features, label: self.to_instance(features, label))
else:
data_instance = input_data_features.mapValues(lambda features: self.to_instance(features))
if self.with_match_id:
data_instance = data_instance.join(input_data_match_id, self.append_match_id)
return data_instance
def append_match_id(self, inst, match_id):
inst.inst_id = match_id
return inst
def to_instance(self, features, label=None):
if self.header is None and len(features) != 0:
raise ValueError("features shape {} not equal to header shape 0".format(len(features)))
elif self.header is not None and len(self.header) != len(features):
raise ValueError("features shape {} not equal to header shape {}".format(len(features), len(self.header)))
if label is not None:
if self.label_type == 'int':
label = int(label)
elif self.label_type in ["float", "float64"]:
label = float(label)
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
else:
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
return Instance(inst_id=None,
features=format_features,
label=label)
@staticmethod
def gen_output_format(features, data_type='float', exclusive_data_type_fid_map=None,
output_format='dense', missing_impute=None):
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
missing_impute_dtype_set = {"int", "int64", "long", "float", "float64", "double"}
missing_impute_value_set = {'', 'NULL', 'null', "NA"}
type_mapping = dict()
if output_format == "dense":
# format_features = copy.deepcopy(features)
format_features = [None] * len(features)
for fid in range(len(features)):
if exclusive_data_type_fid_map is not None and fid in exclusive_data_type_fid_map:
dtype = exclusive_data_type_fid_map[fid]
else:
dtype = data_type
if dtype in missing_impute_dtype_set:
if (missing_impute is not None and features[fid] in missing_impute) or \
(missing_impute is None and features[fid] in missing_impute_value_set):
format_features[fid] = np.nan
continue
format_features[fid] = features[fid]
if exclusive_data_type_fid_map:
if dtype not in type_mapping:
np_type = getattr(np, dtype)
type_mapping[dtype] = np_type
format_features[fid] = type_mapping[dtype](format_features[fid])
if exclusive_data_type_fid_map:
return np.asarray(format_features, dtype=object)
else:
return np.asarray(format_features, dtype=data_type)
indices = []
data = []
column_shape = len(features)
non_zero = 0
for i in range(column_shape):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in missing_impute_value_set):
indices.append(i)
data.append(np.nan)
non_zero += 1
elif data_type in ['float', 'float64', "double"]:
if np.fabs(float(features[i])) < consts.FLOAT_ZERO:
continue
indices.append(i)
data.append(float(features[i]))
non_zero += 1
elif data_type in ['int', "int64", "long"]:
if int(features[i]) == 0:
continue
indices.append(i)
data.append(int(features[i]))
else:
indices.append(i)
data.append(features[i])
return SparseVector(indices, data, column_shape)
def get_summary(self):
if not self.missing_fill and not self.outlier_replace:
return {}
summary_buf = {}
if self.missing_fill:
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf["missing_fill_info"] = missing_summary
if self.outlier_replace:
outlier_replace_summary = dict()
outlier_replace_summary["outlier_value"] = list(self.outlier_impute)
outlier_replace_summary["outlier_replace_value"] = dict(zip(self.header, self.outlier_replace_value))
outlier_replace_summary["outlier_replace_rate"] = dict(zip(self.header, self.outlier_replace_rate))
summary_buf["outlier_replace_rate"] = outlier_replace_summary
return summary_buf
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="dense",
delimitor=self.delimitor,
data_type=self.data_type,
exclusive_data_type=self.exclusive_data_type,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
model_name="DenseFeatureTransformer",
anonymous_header=self.anonymous_header)
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(self.outlier_replace,
self.outlier_replace_method,
self.outlier_impute,
self.outlier_replace_value,
self.outlier_replace_rate,
self.header,
"Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, self.exclusive_data_type, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id, self.anonymous_header = \
load_data_transform_model("DenseFeatureTransformer", model_meta, model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
self.outlier_replace, self.outlier_replace_method, \
self.outlier_impute, self.outlier_replace_value = load_outlier_model(self.header,
"Outlier",
model_meta.outlier_meta,
model_param.outlier_param)
# =============================================================================
# SparseFeatureTransformer: mainly for libsvm input format
# =============================================================================
class SparseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.label_type = data_transform_param.label_type
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.with_match_id = data_transform_param.with_match_id
self.match_id_name = "match_id" if self.with_match_id else None
self.match_id_index = data_transform_param.match_id_index
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name.lower() if self.with_label else None
self.anonymous_generator = None
self.anonymous_header = None
def _update_param(self, schema):
meta = schema["meta"]
self.delimitor = meta.get("delimiter", ",")
self.data_type = meta.get("data_type")
self.with_label = meta.get("with_label", False)
if self.with_label:
self.label_type = meta.get("label_type", "int")
self.label_name = meta.get("label_name", "")
self.with_match_id = meta.get("with_match_id", False)
if self.with_match_id:
match_id_name = schema.get("match_id_name")
if isinstance(match_id_name, list):
self.match_id_name = match_id_name[self.match_id_index]
else:
self.match_id_name = match_id_name
schema["match_id_name"] = self.match_id_name
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
schema = copy.deepcopy(input_data.schema)
if not schema.get("meta", {}):
LOGGER.warning("Data meta is supported to be set with data uploading or binding, "
"please refer to data transform using guides.")
meta = dict(input_format="sparse",
delimiter=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
data_type=self.data_type)
schema["meta"] = meta
generated_header = DataFormatPreProcess.generate_header(input_data, schema)
schema.update(generated_header)
schema = self.anonymous_generator.generate_anonymous_header(schema)
set_schema(input_data, schema)
else:
self._update_param(schema)
if mode == "fit":
self.header = schema["header"]
self.anonymous_header = schema["anonymous_header"]
data_instance = self.fit(input_data)
else:
if not self.anonymous_header:
header_set = set(self.header)
self.anonymous_header = []
for column, anonymous_column in zip(schema["header"], schema["anonymous_header"]):
if column not in header_set:
continue
self.anonymous_header.append(anonymous_column)
schema["header"] = self.header
schema["anonymous_header"] = self.anonymous_header
set_schema(input_data, schema)
data_instance = self.transform(input_data)
set_schema(data_instance, schema)
return data_instance
def fit(self, input_data):
max_feature = len(self.header)
if max_feature == 0:
raise ValueError("no feature value in input data, please check!")
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def transform(self, input_data):
max_feature = len(self.header)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def gen_data_instance(self, input_data, max_feature):
id_range = input_data.schema["meta"].get("id_range", 0)
params = [self.delimitor, self.data_type,
self.label_type, self.with_match_id,
self.match_id_index, id_range,
self.output_format,
self.with_label, max_feature]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
label_type = param_list[2]
with_match_id = param_list[3]
match_id_index = param_list[4]
id_range = param_list[5]
output_format = param_list[6]
with_label = param_list[7]
max_fid = param_list[8]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
offset = 0
if with_match_id:
offset = id_range if id_range else 1
match_id = cols[match_id_index]
else:
match_id = None
label = None
if with_label:
label = cols[offset]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
offset += 1
fid_value = []
for i in range(offset, len(cols)):
fid, val = cols[i].split(":", -1)
fid = int(fid)
if data_type in ["float", "float64"]:
val = float(val)
elif data_type in ["int", "int64"]:
val = int(val)
fid_value.append((fid, val))
if output_format == "dense":
features = [0 for i in range(max_fid)]
for fid, val in fid_value:
features[fid] = val
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fid, val in fid_value:
indices.append(fid)
data.append(val)
features = SparseVector(indices, data, max_fid)
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="sparse",
delimitor=self.delimitor,
data_type=self.data_type,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
with_label=self.with_label,
model_name="SparseFeatureTransformer",
anonymous_header=self.anonymous_header)
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(missing_fill=False,
model_name="Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, _1, _2, self.with_label, self.label_type, self.output_format, \
self.header, self.sid_name, self.label_name, self.with_match_id, self.anonymous_header = \
load_data_transform_model(
"SparseFeatureTransformer",
model_meta,
model_param)
# =============================================================================
# SparseTagTransformer: mainly for tag data
# =============================================================================
class SparseTagTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.tag_with_value = data_transform_param.tag_with_value
self.tag_value_delimitor = data_transform_param.tag_value_delimitor
self.with_label = data_transform_param.with_label
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = data_transform_param.label_name.lower() if data_transform_param.label_name else None
self.missing_fill = data_transform_param.missing_fill
self.missing_fill_method = data_transform_param.missing_fill_method
self.default_value = data_transform_param.default_value
self.with_match_id = data_transform_param.with_match_id
self.match_id_index = data_transform_param.match_id_index
self.match_id_name = "match_id" if self.with_match_id else None
self.missing_impute_rate = None
self.missing_impute = None
self.anonymous_generator = None
self.anonymous_header = None
def _update_param(self, schema):
meta = schema["meta"]
self.delimitor = meta.get("delimiter", ",")
self.data_type = meta.get("data_type")
self.tag_with_value = meta.get("tag_with_value")
self.tag_value_delimitor = meta.get("tag_value_delimiter", ":")
self.with_label = meta.get("with_label", False)
if self.with_label:
self.label_type = meta.get("label_type", "int")
self.label_name = meta.get("label_name")
self.with_match_id = meta.get("with_match_id", False)
if self.with_match_id:
match_id_name = schema.get("match_id_name")
if isinstance(match_id_name, list):
if not isinstance(self.match_id_index, int) or self.match_id_index >= len(match_id_name):
raise ValueError(f"match id index should between 0 and {len(match_id_name) - 1}, "
f"but {self.match_id_index} is given")
self.match_id_name = match_id_name[self.match_id_index]
else:
if self.match_id_index != 0:
raise ValueError("Only one match_id exist, match_id_index should be 0")
self.match_id_name = match_id_name
schema["match_id_name"] = self.match_id_name
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
schema = copy.deepcopy(input_data.schema)
if not schema.get("meta", {}):
LOGGER.warning("Data meta is supported to be set with data uploading or binding, "
"please refer to data transform using guides.")
meta = dict(input_format="tag",
delimiter=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
tag_with_value=self.tag_with_value,
tag_value_delimiter=self.tag_value_delimitor,
data_type=self.data_type)
schema["meta"] = meta
generated_header = DataFormatPreProcess.generate_header(input_data, schema)
schema.update(generated_header)
schema = self.anonymous_generator.generate_anonymous_header(schema)
set_schema(input_data, schema)
else:
self._update_param(schema)
if mode == "fit":
self.header = schema["header"]
self.anonymous_header = schema["anonymous_header"]
data_instance = self.fit(input_data)
else:
if not self.anonymous_header:
header_set = set(self.header)
self.anonymous_header = []
for column, anonymous_column in zip(schema["header"], schema["anonymous_header"]):
if column not in header_set:
continue
self.anonymous_header.append(anonymous_column)
schema["header"] = self.header
schema["anonymous_header"] = self.anonymous_header
set_schema(input_data, schema)
data_instance = self.transform(input_data)
set_schema(data_instance, schema)
return data_instance
@staticmethod
def change_tag_to_str(value, tags_dict=None, delimitor=",", feature_offset=0,
tag_value_delimitor=":"):
vals = value.split(delimitor, -1)
ret = [''] * len(tags_dict)
vals = vals[feature_offset:]
for i in range(len(vals)):
tag, value = vals[i].split(tag_value_delimitor, -1)
idx = tags_dict.get(tag, None)
if idx is not None:
ret[idx] = value
return ret
@staticmethod
def change_str_to_tag(value, tags_dict=None, delimitor=",", tag_value_delimitor=":"):
ret = [None] * len(tags_dict)
tags = sorted(list(tags_dict.keys()))
for i in range(len(value)):
tag, val = tags[i], value[i]
ret[i] = tag_value_delimitor.join([tag, val])
return delimitor.join(ret)
def fill_missing_value(self, input_data, tags_dict, schema, mode="fit"):
feature_offset = DataFormatPreProcess.get_feature_offset(schema)
str_trans_method = functools.partial(self.change_tag_to_str,
tags_dict=tags_dict,
delimitor=self.delimitor,
feature_offset=feature_offset,
tag_value_delimitor=self.tag_value_delimitor)
input_data = input_data.mapValues(str_trans_method)
set_schema(input_data, schema)
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer()
if mode == "fit":
data, self.default_value = imputer_processor.fit(input_data,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
LOGGER.debug("self.default_value is {}".format(self.default_value))
else:
data = imputer_processor.transform(input_data,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
LOGGER.debug("self.missing_impute is {}".format(self.missing_impute))
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
str_trans_tag_method = functools.partial(self.change_str_to_tag,
tags_dict=tags_dict,
delimitor=self.delimitor,
tag_value_delimitor=self.tag_value_delimitor)
data = data.mapValues(str_trans_tag_method)
return data
def fit(self, input_data):
schema = input_data.schema
tags_dict = dict(zip(schema["header"], range(len(schema["header"]))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, schema, mode="fit")
data_instance = self.gen_data_instance(input_data, schema["meta"], tags_dict)
return data_instance
def transform(self, input_data):
schema = input_data.schema
tags_dict = dict(zip(self.header, range(len(self.header))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, schema, mode="transform")
data_instance = self.gen_data_instance(input_data, schema["meta"], tags_dict)
return data_instance
def gen_data_instance(self, input_data, meta, tags_dict):
params = [self.delimitor,
self.data_type,
self.tag_with_value,
self.tag_value_delimitor,
self.with_label,
self.with_match_id,
self.match_id_index,
meta.get("id_range", 0),
self.label_type,
self.output_format,
tags_dict]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
def get_summary(self):
if not self.missing_fill:
return {}
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf = {"missing_fill_info": missing_summary}
return summary_buf
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
tag_with_value = param_list[2]
tag_value_delimitor = param_list[3]
with_label = param_list[4]
with_match_id = param_list[5]
match_id_index = param_list[6]
id_range = param_list[7]
label_type = param_list[8]
output_format = param_list[9]
tags_dict = param_list[10]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
offset = 0
label = None
match_id = None
if with_match_id:
offset = id_range if id_range else 1
if offset == 0:
offset = 1
match_id = cols[match_id_index]
if with_label:
label = cols[offset]
offset += 1
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
if output_format == "dense":
features = [0 for i in range(len(tags_dict))]
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
if _tag in tags_dict:
features[tags_dict.get(_tag)] = _val
else:
if fea in tags_dict:
features[tags_dict.get(fea)] = 1
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
else:
_tag = fea
_val = 1
if _tag not in tags_dict:
continue
indices.append(tags_dict.get(_tag))
if data_type in ["float", "float64"]:
_val = float(_val)
elif data_type in ["int", "int64", "long"]:
_val = int(_val)
elif data_type == "str":
_val = str(_val)
data.append(_val)
features = SparseVector(indices, data, len(tags_dict))
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="tag",
delimitor=self.delimitor,
data_type=self.data_type,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor,
with_label=self.with_label,
label_type=self.label_type,
with_match_id=self.with_match_id,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="Transformer",
anonymous_header=self.anonymous_header)
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, self.tag_with_value, self.tag_value_delimitor, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id, \
self.anonymous_header = load_data_transform_model(
"SparseTagTransformer",
model_meta,
model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
class DataTransform(ModelBase):
def __init__(self):
super(DataTransform, self).__init__()
self.transformer = None
from federatedml.param.data_transform_param import DataTransformParam
self.model_param = DataTransformParam()
self._input_model_meta = None
self._input_model_param = None
def _load_reader(self, schema=None):
if schema is None or not schema.get("meta", {}):
input_format = self.model_param.input_format
else:
input_format = schema["meta"].get("input_format")
if input_format == "dense":
self.transformer = DenseFeatureTransformer(self.model_param)
elif input_format == "sparse" or input_format == "svmlight":
self.transformer = SparseFeatureTransformer(self.model_param)
elif input_format == "tag":
self.transformer = SparseTagTransformer(self.model_param)
else:
raise ValueError("Cannot recognize input format")
if self._input_model_meta:
self.transformer.load_model(self._input_model_meta, self._input_model_param)
self._input_model_meta, self._input_model_param = None, None
self.transformer.anonymous_generator = Anonymous(self.role, self.component_properties.local_partyid)
def _init_model(self, model_param):
self.model_param = model_param
def load_model(self, model_dict):
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
self._input_model_meta = value[model]
if model.endswith("Param"):
self._input_model_param = value[model]
def fit(self, data):
self._load_reader(data.schema)
data_inst = self.transformer.read_data(data, "fit")
if isinstance(self.transformer, (DenseFeatureTransformer, SparseTagTransformer)):
summary_buf = self.transformer.get_summary()
if summary_buf:
self.set_summary(summary_buf)
clear_schema(data_inst)
return data_inst
def transform(self, data):
self._load_reader(data.schema)
data_inst = self.transformer.read_data(data, "transform")
clear_schema(data_inst)
return data_inst
def export_model(self):
if not self.need_run:
model_meta = DataTransformMeta()
model_meta.need_run = False
model_param = DataTransformParam()
model_dict = dict(DataTransformMeta=model_param,
DataTransformParam=model_param)
else:
model_dict = self.transformer.save_model()
return model_dict
def clear_schema(data_inst):
ret_schema = copy.deepcopy(data_inst.schema)
key_words = {"sid", "header", "anonymous_header", "label_name",
"anonymous_label", "match_id_name"}
for key in data_inst.schema:
if key not in key_words:
del ret_schema[key]
data_inst.schema = ret_schema
def set_schema(data_instance, schema):
data_instance.schema = schema
def save_data_transform_model(input_format="dense",
delimitor=",",
data_type="str",
exclusive_data_type=None,
tag_with_value=False,
tag_value_delimitor=":",
with_label=False,
label_name='',
label_type="int",
output_format="dense",
header=None,
sid_name=None,
with_match_id=False,
model_name="DataTransform",
anonymous_header=None):
model_meta = DataTransformMeta()
model_param = DataTransformParam()
model_meta.input_format = input_format
model_meta.delimitor = delimitor
model_meta.data_type = data_type
model_meta.tag_with_value = tag_with_value
model_meta.tag_value_delimitor = tag_value_delimitor
model_meta.with_label = with_label
if with_label:
model_meta.label_name = label_name
model_meta.label_type = label_type
model_meta.output_format = output_format
model_meta.with_match_id = with_match_id
if header is not None:
model_param.header.extend(header)
if anonymous_header is not None:
model_param.anonymous_header.extend(anonymous_header)
if sid_name:
model_param.sid_name = sid_name
if label_name:
model_param.label_name = label_name
if exclusive_data_type is not None:
model_meta.exclusive_data_type.update(exclusive_data_type)
return model_meta, model_param
def load_data_transform_model(model_name="DataTransform",
model_meta=None,
model_param=None):
delimitor = model_meta.delimitor
data_type = model_meta.data_type
tag_with_value = model_meta.tag_with_value
tag_value_delimitor = model_meta.tag_value_delimitor
with_label = model_meta.with_label
label_name = model_meta.label_name if with_label else None
label_type = model_meta.label_type if with_label else None
try:
with_match_id = model_meta.with_match_id
except AttributeError:
with_match_id = False
output_format = model_meta.output_format
header = list(model_param.header) or None
try:
anonymous_header = list(model_param.anonymous_header)
except AttributeError:
anonymous_header = None
sid_name = None
if model_param.sid_name:
sid_name = model_param.sid_name
exclusive_data_type = None
if model_meta.exclusive_data_type:
exclusive_data_type = {}
for col_name in model_meta.exclusive_data_type:
exclusive_data_type[col_name] = model_meta.exclusive_data_type.get(col_name)
return delimitor, data_type, exclusive_data_type, tag_with_value, tag_value_delimitor, with_label, \
label_type, output_format, header, sid_name, label_name, with_match_id, anonymous_header
def save_missing_imputer_model(missing_fill=False,
missing_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
model_name="Imputer"):
model_meta = DataTransformImputerMeta()
model_param = DataTransformImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method:
model_meta.strategy = str(missing_replace_method)
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
if missing_fill_value is not None:
feature_value_dict = dict(zip(header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
return model_meta, model_param
def load_missing_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = model_meta.missing_value
missing_fill_value = model_param.missing_replace_value
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = list(missing_value)
if missing_fill_value:
missing_fill_value = [missing_fill_value.get(head) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value
def save_outlier_model(outlier_replace=False,
outlier_replace_method=None,
outlier_impute=None,
outlier_replace_value=None,
outlier_replace_rate=None,
header=None,
model_name="Outlier"):
model_meta = DataTransformOutlierMeta()
model_param = DataTransformOutlierParam()
model_meta.is_outlier = outlier_replace
if outlier_replace:
if outlier_replace_method:
model_meta.strategy = str(outlier_replace_method)
if outlier_impute:
model_meta.outlier_value.extend(map(str, outlier_impute))
if outlier_replace_value:
outlier_value_dict = dict(zip(header, map(str, outlier_replace_value)))
model_param.outlier_replace_value.update(outlier_value_dict)
if outlier_replace_rate:
outlier_value_ratio_dict = dict(zip(header, outlier_replace_rate))
model_param.outlier_value_ratio.update(outlier_value_ratio_dict)
return model_meta, model_param
def load_outlier_model(header=None,
model_name="Outlier",
model_meta=None,
model_param=None):
outlier_replace = model_meta.is_outlier
outlier_replace_method = model_meta.strategy
outlier_value = model_meta.outlier_value
outlier_replace_value = model_param.outlier_replace_value
if outlier_replace:
if not outlier_replace_method:
outlier_replace_method = None
if not outlier_value:
outlier_value = None
else:
outlier_value = list(outlier_value)
if outlier_replace_value:
outlier_replace_value = [outlier_replace_value.get(head) for head in header]
else:
outlier_replace_value = None
else:
outlier_replace_method = None
outlier_value = None
outlier_replace_value = None
return outlier_replace, outlier_replace_method, outlier_value, outlier_replace_value
| 58,078 | 43.744992 | 139 |
py
|
FATE
|
FATE-master/python/federatedml/util/param_extract.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
# Param Exact Class
# =============================================================================
import builtins
from federatedml.util import consts
class ParamExtract(object):
def __init__(self):
self.builtin_types = dir(builtins)
def parse_param_from_config(
self, param, config_json, valid_check=False, module=None, cpn=None
):
if config_json is None or type(config_json).__name__ != "dict":
raise Exception(
"config file is not a valid dict type, please have a check!"
)
# default_section = type(param).__name__
if "ComponentParam" not in config_json:
return param
"""
if default_section not in config_json:
return param
"""
param = self.recursive_parse_param_from_config(
param,
config_json.get("ComponentParam"),
param_parse_depth=0,
valid_check=valid_check,
name=f"{module}#{cpn}",
)
return param
def recursive_parse_param_from_config(
self, param, config_json, param_parse_depth, valid_check, name
):
if param_parse_depth > consts.PARAM_MAXDEPTH:
raise ValueError("Param define nesting too deep!!!, can not parse it")
inst_variables = param.__dict__
for variable in inst_variables:
attr = getattr(param, variable)
if type(attr).__name__ in self.builtin_types or attr is None:
if variable in config_json:
option = config_json[variable]
setattr(param, variable, option)
elif variable in config_json:
sub_params = self.recursive_parse_param_from_config(
attr,
config_json.get(variable),
param_parse_depth + 1,
valid_check,
name,
)
setattr(param, variable, sub_params)
if valid_check:
redundant = []
for var in config_json:
if var not in inst_variables:
redundant.append(var)
if redundant and name is not None:
raise ValueError(f"cpn `{name}` has redundant parameters {redundant}")
return param
| 3,079 | 32.478261 | 86 |
py
|
FATE
|
FATE-master/python/federatedml/util/reduce_by_key.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def reduce(table, func, key_func=None):
if key_func is None:
return table.reduce(func)
it = table.collect()
ret = {}
for k, v in it:
agg_key = key_func(k)
if agg_key in ret:
ret[agg_key] = func(ret[agg_key], v)
else:
ret[agg_key] = v
return ret
| 938 | 30.3 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/util/data_format_preprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
import numpy as np
DEFAULT_LABEL_NAME = "label"
DEFAULT_MATCH_ID_PREFIX = "match_id"
SVMLIGHT_COLUMN_PREFIX = "x"
DEFAULT_SID_NAME = "sid"
DELIMITER = ","
class DataFormatPreProcess(object):
@staticmethod
def get_feature_offset(meta):
"""
works for sparse/svmlight/tag value data
"""
with_label = meta.get("with_label", False)
with_match_id = meta.get("with_match_id", False)
id_range = meta.get("id_range", 0)
if with_match_id:
if not id_range:
id_range = 1
offset = id_range
if with_label:
offset += 1
return offset
@staticmethod
def agg_partition_tags(kvs, delimiter=",", offset=0, tag_with_value=True, tag_value_delimiter=":"):
tag_set = set()
for _, value in kvs:
cols = value.split(delimiter, -1)[offset:]
if tag_with_value:
tag_set |= set([col.split(tag_value_delimiter, -1)[0] for col in cols])
else:
tag_set |= set(cols)
return tag_set
@staticmethod
def get_tag_list(data, schema):
if "meta" not in schema:
raise ValueError("Meta not in schema")
meta = schema["meta"]
if meta["input_format"] != "tag":
raise ValueError("Input DataFormat Should Be Tag Or Tag Value")
delimiter = meta["delimiter"]
tag_with_value = meta["tag_with_value"]
if not isinstance(tag_with_value, bool):
raise ValueError(f"tag with value should be bool, bug {tag_with_value} find")
tag_value_delimiter = meta["tag_value_delimiter"]
offset = DataFormatPreProcess.get_feature_offset(meta)
agg_func = functools.partial(DataFormatPreProcess.agg_partition_tags,
delimiter=delimiter,
offset=offset,
tag_with_value=tag_with_value,
tag_value_delimiter=tag_value_delimiter)
agg_tags = data.applyPartitions(agg_func).reduce(lambda tag_set1, tag_set2: tag_set1 | tag_set2)
return sorted(agg_tags)
@staticmethod
def get_lib_svm_dim(data, schema):
if "meta" not in schema:
raise ValueError("Meta not in schema")
meta = schema["meta"]
if "input_format" == ["sparse", "svmlight"]:
raise ValueError("Input DataFormat Should Be SVMLight")
delimiter = meta.get("delimiter", " ")
offset = DataFormatPreProcess.get_feature_offset(meta)
max_dim = data.\
mapValues(
lambda value:
max([int(fid_value.split(":", -1)[0]) for fid_value in value.split(delimiter, -1)[offset:]])).\
reduce(lambda x, y: max(x, y))
return max_dim
@staticmethod
def generate_header(data, schema):
if not schema.get('meta'):
raise ValueError("Meta not in schema")
meta = schema["meta"]
generated_header = dict(original_index_info=dict(), meta=meta)
input_format = meta.get("input_format")
delimiter = meta.get("delimiter", ",")
if not input_format:
raise ValueError("InputFormat should be configured.")
if input_format == "dense":
if "header" not in schema:
raise ValueError("Dense input data must have schema")
header = schema["header"].strip().split(delimiter, -1)
header = list(map(lambda col: col.strip(), header))
header_index_mapping = dict(zip(header, range(len(header))))
with_label = meta.get("with_label", False)
if not isinstance(with_label, bool):
raise ValueError("with_label should be True or False")
id_list = meta.get("id_list", [])
if not isinstance(id_list, (type(None), list)):
raise ValueError("id_list should be list type or None")
with_match_id = meta.get("with_match_id", False)
filter_ids = set()
if with_match_id:
if not id_list:
match_id_name = header[0]
match_id_index = [0]
filter_ids.add(0)
else:
match_id_name = []
match_id_index = []
for _id in id_list:
if _id in header_index_mapping:
match_id_name.append(_id)
match_id_index.append(header_index_mapping[_id])
filter_ids.add(match_id_index[-1])
else:
raise ValueError(f"Can not find {_id} in id_list in data's header")
generated_header["match_id_name"] = match_id_name
generated_header["original_index_info"]["match_id_index"] = match_id_index
if with_label:
label_name = meta["label_name"]
label_index = header_index_mapping[label_name]
generated_header["label_name"] = label_name
generated_header["original_index_info"]["label_index"] = label_index
filter_ids.add(label_index)
header_ids = list(filter(lambda ids: ids not in filter_ids, range(len(header))))
generated_header["original_index_info"]["header_index"] = header_ids
generated_header["header"] = np.array(header)[header_ids].tolist()
else:
if input_format == "tag":
sorted_tag_list = DataFormatPreProcess.get_tag_list(data, schema)
generated_header["header"] = sorted_tag_list
elif input_format in ["sparse", "svmlight"]:
max_dim = DataFormatPreProcess.get_lib_svm_dim(data, schema)
generated_header["header"] = [SVMLIGHT_COLUMN_PREFIX + str(i) for i in range(max_dim + 1)]
else:
raise NotImplementedError(f"InputFormat {input_format} is not implemented")
with_label = meta.get("with_label", False)
with_match_id = meta.get("with_match_id", False)
id_range = meta.get("id_range", 0)
if id_range and not with_match_id:
raise ValueError(f"id_range {id_range} != 0, with_match_id should be true")
if with_match_id:
if not id_range:
id_range = 1
if id_range == 1:
generated_header["match_id_name"] = DEFAULT_MATCH_ID_PREFIX
else:
generated_header["match_id_name"] = [DEFAULT_MATCH_ID_PREFIX + str(i) for i in range(id_range)]
if with_label:
generated_header["label_name"] = DEFAULT_LABEL_NAME
if id_range:
generated_header["meta"]["id_range"] = id_range
generated_header["is_display"] = False
sid = schema.get("sid")
if sid is None or sid == "":
sid = DEFAULT_SID_NAME
generated_header["sid"] = sid.strip()
return generated_header
@staticmethod
def reconstruct_header(schema):
original_index_info = schema.get("original_index_info")
if not original_index_info:
return schema["header"]
header_index_mapping = dict()
if "header_index" in original_index_info and original_index_info["header_index"]:
for idx, col_name in zip(original_index_info["header_index"], schema["header"]):
header_index_mapping[idx] = col_name
if original_index_info.get("match_id_index") is not None:
match_id_name = schema["match_id_name"]
match_id_index = original_index_info["match_id_index"]
if isinstance(match_id_name, str):
header_index_mapping[match_id_index[0]] = match_id_name
else:
for idx, col_name in zip(match_id_index, match_id_name):
header_index_mapping[idx] = col_name
if original_index_info.get("label_index") is not None:
header_index_mapping[original_index_info["label_index"]] = schema["label_name"]
original_header = [None] * len(header_index_mapping)
for idx, col_name in header_index_mapping.items():
original_header[idx] = col_name
return original_header
@staticmethod
def extend_header(schema, columns):
schema = copy.deepcopy(schema)
original_index_info = schema.get("original_index_info")
columns = list(map(lambda column: column.strip(), columns))
header = schema["header"]
if isinstance(header, list):
header.extend(columns)
schema["header"] = header
if original_index_info and "header_index" in original_index_info:
header_index = original_index_info["header_index"]
if header_index:
pre_max_col_idx = max(header_index)
else:
pre_max_col_idx = -1
if original_index_info.get("label_index") is not None:
pre_max_col_idx = max(original_index_info["label_index"], pre_max_col_idx)
if original_index_info.get("match_id_index") is not None:
pre_max_col_idx = max(max(original_index_info["match_id_index"]), pre_max_col_idx)
append_header_index = [i + pre_max_col_idx + 1 for i in range(len(columns))]
schema["original_index_info"]["header_index"] = header_index + append_header_index
else:
if len(header) == 0:
new_header = DELIMITER.join(columns)
else:
new_header = DELIMITER.join(header.split(DELIMITER, -1) + columns)
schema["header"] = new_header
if schema.get("sid") is not None:
schema["sid"] = schema["sid"].strip()
return schema
@staticmethod
def clean_header(schema):
schema = copy.deepcopy(schema)
header = schema["header"]
if "label_name" in schema:
del schema["label_name"]
if "anonymous_header" in schema:
del schema["anonymous_header"]
if "anonymous_label" in schema:
del schema["anonymous_label"]
if isinstance(header, list):
schema["header"] = []
original_index_info = schema.get("original_index_info")
if original_index_info:
del schema["original_index_info"]
if "match_id_name" in schema:
del schema["match_id_name"]
if "match_id_index" in schema:
del schema["match_id_index"]
else:
schema["header"] = ""
return schema
@staticmethod
def recover_schema(schema):
if not schema.get('meta'):
raise ValueError("Meta not in schema, can not recover meta")
recovery_schema = copy.deepcopy(schema)
meta = schema["meta"]
input_format = meta.get("input_format", "dense")
if input_format == "dense":
"""schema has not been processed yet"""
if "original_index_info" not in schema:
return recovery_schema
header_list = DataFormatPreProcess.reconstruct_header(schema)
del recovery_schema["original_index_info"]
delimiter = schema.get("delimiter", ",")
header = "" if not header_list else delimiter.join(header_list)
recovery_schema["header"] = header
if "label_name" in recovery_schema:
del recovery_schema["label_name"]
if meta.get("with_match_id"):
del recovery_schema["match_id_name"]
else:
recovery_schema["header"] = ""
if "label_name" in recovery_schema:
del recovery_schema["label_name"]
if meta.get("id_range"):
recovery_schema["meta"]["id_range"] = 0
if meta.get("with_label"):
del recovery_schema["meta"]["label_name"]
del recovery_schema["is_display"]
if meta.get("with_match_id"):
del recovery_schema["match_id_name"]
if "anonymous_header" in schema:
del recovery_schema["anonymous_header"]
if "anonymous_label" in schema:
del recovery_schema["anonymous_label"]
return recovery_schema
| 13,235 | 36.179775 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/util/_log.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common.log import getLogger
LOGGER = getLogger()
| 681 | 34.894737 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/anonymous_generator_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import random
from federatedml.util.anonymous_generator_util import Anonymous
class TeskClassifyLabelChecker(unittest.TestCase):
def setUp(self):
pass
def test_extend_columns(self):
anonymous_generator = Anonymous(role="guest", party_id=10000)
schema = dict(header=["feature" + str(i) for i in range(100)])
new_schema = anonymous_generator.generate_anonymous_header(schema)
anonymous_header = new_schema["anonymous_header"]
extend_columns1 = ["e0", "e1", "e3"]
anonymous_header1 = anonymous_generator.extend_columns(anonymous_header, extend_columns1)
self.assertTrue(len(anonymous_header1) == len(anonymous_header) + len(extend_columns1))
for i in range(len(extend_columns1)):
idx = i + len(anonymous_header)
self.assertTrue(anonymous_header1[idx] == "_".join(["guest", "10000", "exp", str(i)]))
extend_columns2 = ["f0", "f1", "f2", "f3"]
anonymous_header2 = anonymous_generator.extend_columns(anonymous_header1, extend_columns2)
self.assertTrue(len(anonymous_header2) == len(anonymous_header) + len(extend_columns1) + len(extend_columns2))
for i in range(len(extend_columns2)):
idx = i + len(anonymous_header) + len(extend_columns1)
self.assertTrue(anonymous_header2[idx] == "_".join(
["guest", "10000", "exp", str(i + len(extend_columns1))]))
def test_anonymous_header_generate_with_party_id(self):
anonymous_generator = Anonymous(role="guest", party_id=10000)
schema = dict()
schema["header"] = ["feature" + str(i) for i in range(100)]
new_schema = anonymous_generator.generate_anonymous_header(schema)
anonymous_header = new_schema["anonymous_header"]
self.assertTrue(len(anonymous_header) == 100)
for i in range(100):
self.assertTrue(anonymous_header[i] == "_".join(["guest", "10000", "x" + str(i)]))
def test_anonymous_header_generate_without_party_id(self):
schema = dict(header=["feature" + str(i) for i in range(100)])
new_schema = Anonymous().generate_anonymous_header(schema)
anonymous_header = new_schema["anonymous_header"]
self.assertTrue(len(anonymous_header) == 100)
for i in range(100):
self.assertTrue(anonymous_header[i] == "x" + str(i))
def test_generate_derived_header_without_extend(self):
schema = dict(header=["feature" + str(i) for i in range(10)])
new_schema = Anonymous(role="guest", party_id=10000).generate_anonymous_header(schema)
anonymous_header = new_schema["anonymous_header"]
derived_dict = {"feature5": ["feature5_f0", "feature5_f1", "feature5_f2", "feature5_f3"],
"feature6": ["feature6_e1", "feature6_e2", "feature6_e3"]}
derived_anonymous_header = Anonymous().generate_derived_header(original_header=schema["header"],
original_anonymous_header=anonymous_header,
derived_dict=derived_dict)
for i in range(5, 9):
self.assertTrue(derived_anonymous_header[i] == anonymous_header[5] + "_" + str(i - 5))
for i in range(9, 12):
self.assertTrue(derived_anonymous_header[i] == anonymous_header[6] + "_" + str(i - 9))
def test_generate_derived_header_with_extend(self):
anonymous_generator = Anonymous(role="guest", party_id=10000)
schema = dict(header=["feature" + str(i) for i in range(100)])
new_schema = anonymous_generator.generate_anonymous_header(schema)
anonymous_header = new_schema["anonymous_header"]
extend_columns1 = ["e0", "e1", "e3"]
extend_header = schema["header"] + extend_columns1
anonymous_header1 = anonymous_generator.extend_columns(anonymous_header, extend_columns1)
derived_dict = {"e0": ["feature5_f0", "feature5_f1", "feature5_f2", "feature5_f3"],
"e3": ["feature6_e1", "feature6_e2", "feature6_e3"]}
derived_anonymous_header = anonymous_generator.generate_derived_header(
original_header=extend_header,
original_anonymous_header=anonymous_header1,
derived_dict=derived_dict)
for i in range(100, 104):
self.assertTrue(derived_anonymous_header[i] == anonymous_header1[100] + "_" + str(i - 100))
for i in range(105, 104):
self.assertTrue(derived_anonymous_header[i] == anonymous_header1[102] + "_" + str(i - 105))
def test_update_anonymous_header_with_role(self):
schema = dict(header=["feature" + str(i) for i in range(100)])
anonymous_header_without_role = Anonymous().generate_anonymous_header(schema)
schema["anonymous_header"] = anonymous_header_without_role["anonymous_header"]
schema = Anonymous.update_anonymous_header_with_role(schema, "guest", 10000)
anonymous_header = schema["anonymous_header"]
for i in range(100):
self.assertTrue(anonymous_header[i] == "_".join(["guest", "10000", "x" + str(i)]))
if __name__ == '__main__':
unittest.main()
| 5,864 | 46.298387 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/classify_label_checker_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import random
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.util.classify_label_checker import ClassifyLabelChecker, RegressionLabelChecker
class TeskClassifyLabelChecker(unittest.TestCase):
def setUp(self):
session.init("test_label_checker")
self.small_label_set = [Instance(label=i % 5) for i in range(100)]
self.classify_inst = session.parallelize(self.small_label_set, include_key=False, partition=16)
self.regression_label = [Instance(label=random.random()) for i in range(100)]
self.regression_inst = session.parallelize(self.regression_label, partition=16, include_key=False)
self.classify_checker = ClassifyLabelChecker()
self.regression_checker = RegressionLabelChecker()
def test_classify_label_checkert(self):
num_class, classes = self.classify_checker.validate_label(self.classify_inst)
self.assertTrue(num_class == 5)
self.assertTrue(sorted(classes) == [0, 1, 2, 3, 4])
def test_regression_label_checker(self):
self.regression_checker.validate_label(self.regression_inst)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 1,901 | 37.04 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/sample_weight_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.util.sample_weight import SampleWeight
class TestSampleWeight(unittest.TestCase):
def setUp(self):
session.init("test_sample_weight_" + str(uuid.uuid1()))
self.class_weight = {"0": 2, "1": 3}
data = []
for i in range(1, 11):
label = 1 if i % 5 == 0 else 0
instance = Instance(inst_id=i, features=np.random.random(3), label=label)
data.append((i, instance))
schema = {"header": ["x0", "x1", "x2"],
"sid": "id", "label_name": "y"}
self.table = session.parallelize(data, include_key=True, partition=8)
self.table.schema = schema
self.sample_weight_obj = SampleWeight()
def test_get_class_weight(self):
class_weight = self.sample_weight_obj.get_class_weight(self.table)
c_class_weight = {"1": 10 / 4, "0": 10 / 16}
self.assertDictEqual(class_weight, c_class_weight)
def test_replace_weight(self):
instance = self.table.first()
weighted_instance = self.sample_weight_obj.replace_weight(instance[1], self.class_weight)
self.assertEqual(weighted_instance.weight, self.class_weight[str(weighted_instance.label)])
def test_assign_sample_weight(self):
weighted_table = self.sample_weight_obj.assign_sample_weight(self.table, self.class_weight, None, False)
weighted_table.mapValues(lambda v: self.assertEqual(v.weight, self.class_weight[str(v.label)]))
def test_get_weight_loc(self):
c_loc = 2
loc = self.sample_weight_obj.get_weight_loc(self.table, "x2")
self.assertEqual(loc, c_loc)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 2,509 | 35.376812 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/early_stop_test.py
|
import unittest
from federatedml.callbacks.validation_strategy import ValidationStrategy
import numpy as np
from federatedml.util import consts
from federatedml.param.evaluation_param import EvaluateParam
class TestValidationStrategy(unittest.TestCase):
def setUp(self) -> None:
self.role = 'guest'
self.mode = 'hetero'
self.early_stopping_round = 1
self.use_first_metric_only = False
@staticmethod
def generate_fake_eval_metrics(total_rounds, decrease_round, metrics=['ks', 'auc'], start_val=0.8):
assert total_rounds >= decrease_round
eval_result_list = []
start_decrease_round = total_rounds - decrease_round
for i in range(total_rounds):
if i < start_decrease_round:
start_val += 0.01
else:
start_val -= 0.01
eval_dict = {metric: start_val for metric in metrics}
eval_result_list.append(eval_dict)
return eval_result_list
def test_early_stopping(self):
test_rounds = [i for i in range(10, 100)]
decrease_rounds = [np.random.randint(i) for i in test_rounds]
for test_round, decrease_round in zip(test_rounds, decrease_rounds):
eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )
self.early_stopping_round = decrease_round - 1
if self.early_stopping_round <= 0:
continue
validation_strategy = ValidationStrategy(
self.role,
self.mode,
early_stopping_rounds=self.early_stopping_round,
use_first_metric_only=self.use_first_metric_only)
for idx, eval_res in enumerate(eval_dicts):
validation_strategy.performance_recorder.update(eval_res)
check_rs = validation_strategy.check_early_stopping()
if check_rs:
self.assertTrue((test_round - decrease_round + self.early_stopping_round - 1) == idx)
print('test checking passed')
break
def test_use_first_metric_only(self):
def evaluate(param, early_stopping_rounds, use_first_metric_only):
eval_type = param.eval_type
metric_list = param.metrics
first_metric = None
if early_stopping_rounds and use_first_metric_only and len(metric_list) != 0:
single_metric_list = None
if eval_type == consts.BINARY:
single_metric_list = consts.BINARY_SINGLE_VALUE_METRIC
elif eval_type == consts.REGRESSION:
single_metric_list = consts.REGRESSION_SINGLE_VALUE_METRICS
elif eval_type == consts.MULTY:
single_metric_list = consts.MULTI_SINGLE_VALUE_METRIC
for metric in metric_list:
if metric in single_metric_list:
first_metric = metric
break
return first_metric
param_0 = EvaluateParam(metrics=['roc', 'lift', 'ks', 'auc', 'gain'], eval_type='binary')
param_1 = EvaluateParam(metrics=['acc', 'precision', 'auc'], eval_type='binary')
param_2 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'recall', 'lift'], eval_type='binary')
param_3 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'auc', 'recall'], eval_type='multi')
print(evaluate(param_0, 10, True))
print(evaluate(param_1, 10, True))
print(evaluate(param_2, 10, True))
print(evaluate(param_3, 10, True))
def test_best_iter(self):
test_rounds = [i for i in range(10, 100)]
decrease_rounds = [np.random.randint(i) for i in test_rounds]
for test_round, decrease_round in zip(test_rounds, decrease_rounds):
eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )
self.early_stopping_round = decrease_round - 1
if self.early_stopping_round <= 0:
continue
validation_strategy = ValidationStrategy(self.role, self.mode,
early_stopping_rounds=self.early_stopping_round,
use_first_metric_only=self.use_first_metric_only)
for idx, eval_res in enumerate(eval_dicts):
validation_strategy.performance_recorder.update(eval_res)
check_rs = validation_strategy.check_early_stopping()
if check_rs:
best_perform = validation_strategy.performance_recorder.cur_best_performance
self.assertDictEqual(best_perform, eval_dicts[test_round - decrease_round - 1])
print('best iter checking passed')
break
def test_homo_checking(self):
try:
validation_strategy = ValidationStrategy(self.role, mode='homo',
early_stopping_rounds=1)
except Exception as e:
# throwing an error is expected
print(e)
print('error detected {}, homo checking passed'.format(e))
if __name__ == '__main__':
tvs = TestValidationStrategy()
tvs.setUp()
tvs.test_use_first_metric_only()
# tvs.test_early_stopping()
# tvs.test_best_iter()
# tvs.test_homo_checking() # expect checking error !!!
| 5,471 | 39.235294 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/param_extract_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import os
from federatedml.util.param_extract import ParamExtract
from federatedml.param import InitParam
from federatedml.param.boosting_param import HeteroSecureBoostParam
class TestParamExtract(unittest.TestCase):
def setUp(self):
self.init_param = InitParam()
self.boosting_tree_param = HeteroSecureBoostParam()
self.config_dict = \
{"ComponentParam": {
"init_param": {"init_method": "test_init", "fit_intercept": False},
"tree_param": {"criterion_method": "test_decisiontree"},
"task_type": "test_boostingtree",
"test_variable": "test"}
}
def test_directly_extract(self):
boosting_tree_param = HeteroSecureBoostParam()
extractor = ParamExtract()
boosting_tree_param = extractor.parse_param_from_config(boosting_tree_param, self.config_dict)
self.assertTrue(boosting_tree_param.task_type == "test_boostingtree")
def test_undefine_variable_extract(self):
boosting_tree_param = HeteroSecureBoostParam()
extractor = ParamExtract()
boosting_tree_param = extractor.parse_param_from_config(boosting_tree_param, self.config_dict)
self.assertTrue(not hasattr(boosting_tree_param, "test_variable"))
def test_param_embedding(self):
boosting_tree_param = HeteroSecureBoostParam()
extractor = ParamExtract()
boosting_tree_param = extractor.parse_param_from_config(boosting_tree_param, self.config_dict)
print("boosting_tree_param.tree_param.criterion_method {}".format(
boosting_tree_param.tree_param.criterion_method))
self.assertTrue(boosting_tree_param.tree_param.criterion_method == "test_decisiontree")
if __name__ == '__main__':
unittest.main()
| 2,433 | 39.566667 | 102 |
py
|
FATE
|
FATE-master/python/federatedml/util/test/label_transform_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.statistic.data_overview import predict_detail_dict_to_str
from federatedml.util.label_transform import LabelTransformer
class TestLabelTransform(unittest.TestCase):
def setUp(self):
session.init("test_label_transform_" + str(uuid.uuid1()))
self.label_encoder = {"yes": 1, "no": 0}
self.predict_label_encoder = {1: "yes", 0: "no"}
data = []
for i in range(1, 11):
label = "yes" if i % 5 == 0 else "no"
instance = Instance(inst_id=i, features=np.random.random(3), label=label)
data.append((i, instance))
schema = {"header": ["x0", "x1", "x2"],
"sid": "id", "label_name": "y"}
self.table = session.parallelize(data, include_key=True, partition=8)
self.table.schema = schema
self.label_transformer_obj = LabelTransformer()
def test_get_label_encoder(self):
self.label_transformer_obj.update_label_encoder(self.table)
c_label_encoder = {"yes": 1, "no": 0}
self.assertDictEqual(self.label_transformer_obj.label_encoder, c_label_encoder)
def test_replace_instance_label(self):
instance = self.table.first()[1]
replaced_instance = self.label_transformer_obj.replace_instance_label(instance, self.label_encoder)
self.assertEqual(replaced_instance.label, self.label_encoder[instance.label])
def test_transform_data_label(self):
replaced_data = self.label_transformer_obj.transform_data_label(self.table, self.label_encoder)
replaced_data.join(self.table, lambda x, y: self.assertEqual(x.label, self.label_encoder[y.label]))
def test_replace_predict_label(self):
true_label, predict_label, predict_score, predict_detail, predict_type = 1, 0, 0.1, {
"1": 0.1, "0": 0.9}, "train"
predict_detail = predict_detail_dict_to_str(predict_detail)
predict_result = Instance(inst_id=0,
features=[true_label, predict_label, predict_score, predict_detail, predict_type])
r_predict_instance = self.label_transformer_obj.replace_predict_label(
predict_result, self.predict_label_encoder)
r_predict_result = r_predict_instance.features
c_predict_detail = predict_detail_dict_to_str({"yes": 0.1, "no": 0.9})
c_predict_result = ["yes", "no", predict_score, c_predict_detail, predict_type]
self.assertEqual(r_predict_result, c_predict_result)
if __name__ == '__main__':
unittest.main()
| 3,293 | 42.92 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/test/spdz_test/spdz_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import time
from prettytable import PrettyTable, ORGMODE
from fate_arch.session import computing_session as session, get_parties
from federatedml.secureprotol.spdz import SPDZ
from federatedml.model_base import ModelBase, ComponentOutput
from federatedml.test.spdz_test.spdz_test_param import SPDZTestParam
from federatedml.util import LOGGER
from federatedml.secureprotol.spdz.tensor.fixedpoint_table import FixedPointTensor as TableTensor
from federatedml.secureprotol.spdz.tensor.fixedpoint_numpy import FixedPointTensor as NumpyTensor
class SPDZTest(ModelBase):
def __init__(self):
super(SPDZTest, self).__init__()
self.data_num = None
self.data_partition = None
self.seed = None
self.test_round = None
self.tracker = None
"""plaintest data"""
self.int_data_x = None
self.float_data_x = None
self.int_data_y = None
self.float_data_y = None
self.model_param = SPDZTestParam()
self.parties = None
self.local_party = None
self.other_party = None
self._set_parties()
self.metric = None
self.operation = None
self.test_count = None
self.op_test_list = ["float_add", "int_add", "float_sub", "int_sub", "float_dot", "int_dot"]
self._summary = {"op_test_list": self.op_test_list,
"tensor_type": ["numpy", "table"],
"numpy": {},
"table": {}}
def _init_runtime_parameters(self, cpn_input):
self.model_param.update(cpn_input.parameters)
self.tracker = cpn_input.tracker
self._init_model()
def _init_model(self):
self.data_num = self.model_param.data_num
self.data_partition = self.model_param.data_partition
self.seed = self.model_param.seed
self.test_round = self.model_param.test_round
self.data_lower_bound = self.model_param.data_lower_bound
self.data_upper_bound = self.model_param.data_upper_bound
self.data_lower_bound = 0
self.data_upper_bound = 100
def _set_parties(self):
parties = []
guest_parties = get_parties().roles_to_parties(["guest"])
host_parties = get_parties().roles_to_parties(["host"])
parties.extend(guest_parties)
parties.extend(host_parties)
local_party = get_parties().local_party
other_party = parties[0] if parties[0] != local_party else parties[1]
self.parties = parties
self.local_party = local_party
self.other_party = other_party
def _init_data(self):
np.random.seed(self.seed)
self.int_data_x = np.random.randint(int(self.data_lower_bound), int(self.data_upper_bound), size=self.data_num)
self.float_data_x = np.random.uniform(self.data_lower_bound, self.data_upper_bound, size=self.data_num)
self.int_data_y = np.random.randint(int(self.data_lower_bound), int(self.data_upper_bound), size=self.data_num)
self.float_data_y = np.random.uniform(self.data_lower_bound, self.data_upper_bound, size=self.data_num)
def _test_spdz(self):
table_list = []
table_int_data_x, table_float_data_x = None, None
table_int_data_y, table_float_data_y = None, None
if self.local_party.role == "guest":
table_int_data_x = session.parallelize(self.int_data_x,
include_key=False,
partition=self.data_partition)
table_int_data_x = table_int_data_x.mapValues(lambda x: np.array([x]))
table_float_data_x = session.parallelize(self.float_data_x,
include_key=False,
partition=self.data_partition)
table_float_data_x = table_float_data_x.mapValues(lambda x: np.array([x]))
else:
table_int_data_y = session.parallelize(self.int_data_y,
include_key=False,
partition=self.data_partition)
table_int_data_y = table_int_data_y.mapValues(lambda y: np.array([y]))
table_float_data_y = session.parallelize(self.float_data_y,
include_key=False,
partition=self.data_partition)
table_float_data_y = table_float_data_y.mapValues(lambda y: np.array([y]))
for tensor_type in ["numpy", "table"]:
table = PrettyTable()
table.set_style(ORGMODE)
field_name = ["DataType", "One time consumption", f"{self.data_num} times consumption",
"relative acc", "log2 acc", "operations per second"]
self._summary["field_name"] = field_name
table.field_names = field_name
with SPDZ(local_party=self.local_party, all_parties=self.parties) as spdz:
for op_type in self.op_test_list:
start_time = time.time()
for epoch in range(self.test_round):
LOGGER.info(f"test spdz, tensor_type: {tensor_type}, op_type: {op_type}, epoch: {epoch}")
tag = "_".join([tensor_type, op_type, str(epoch)])
spdz.set_flowid(tag)
if self.local_party.role == "guest":
if tensor_type == "table":
if op_type.startswith("int"):
fixed_point_x = TableTensor.from_source("int_x_" + tag, table_int_data_x)
fixed_point_y = TableTensor.from_source("int_y_" + tag, self.other_party)
else:
fixed_point_x = TableTensor.from_source("float_x_" + tag, table_float_data_x)
fixed_point_y = TableTensor.from_source("float_y_" + tag, self.other_party)
else:
if op_type.startswith("int"):
fixed_point_x = NumpyTensor.from_source("int_x_" + tag, self.int_data_x)
fixed_point_y = NumpyTensor.from_source("int_y_" + tag, self.other_party)
else:
fixed_point_x = NumpyTensor.from_source("float_x_" + tag, self.float_data_x)
fixed_point_y = NumpyTensor.from_source("float_y_" + tag, self.other_party)
else:
if tensor_type == "table":
if op_type.startswith("int"):
fixed_point_y = TableTensor.from_source("int_y_" + tag, table_int_data_y)
fixed_point_x = TableTensor.from_source("int_x_" + tag, self.other_party)
else:
fixed_point_y = TableTensor.from_source("float_y_" + tag, table_float_data_y)
fixed_point_x = TableTensor.from_source("float_x_" + tag, self.other_party)
else:
if op_type.startswith("int"):
fixed_point_y = NumpyTensor.from_source("int_y_" + tag, self.int_data_y)
fixed_point_x = NumpyTensor.from_source("int_x_" + tag, self.other_party)
else:
fixed_point_y = NumpyTensor.from_source("float_y_" + tag, self.float_data_y)
fixed_point_x = NumpyTensor.from_source("float_x_" + tag, self.other_party)
ret = self.calculate_ret(op_type, tensor_type, fixed_point_x, fixed_point_y)
total_time = time.time() - start_time
self.output_table(op_type, table, tensor_type, total_time, ret)
table_list.append(table)
self.tracker.log_component_summary(self._summary)
for table in table_list:
LOGGER.info(table)
def calculate_ret(self, op_type, tensor_type,
fixed_point_x, fixed_point_y,
):
if op_type.endswith("add"):
ret = (fixed_point_x + fixed_point_y).get()
elif op_type.endswith("sub"):
ret = (fixed_point_x - fixed_point_y).get()
else:
ret = (fixed_point_x.dot(fixed_point_y)).get()[0]
if tensor_type == "table":
ret = ret[0]
if tensor_type == "table" and not op_type.endswith("dot"):
arr = [None] * self.data_num
for k, v in ret.collect():
arr[k] = v[0]
ret = np.array(arr)
return ret
def output_table(self, op_type, table, tensor_type, total_time, spdz_ret):
if op_type.startswith("int"):
data_x = self.int_data_x
data_y = self.int_data_y
else:
data_x = self.float_data_x
data_y = self.float_data_y
numpy_ret = None
if op_type.endswith("add") or op_type.endswith("sub"):
start = time.time()
for i in range(self.test_round):
if op_type.endswith("add"):
numpy_ret = data_x + data_y
else:
numpy_ret = data_x - data_y
plain_text_time = time.time() - start
relative_acc = 0
for np_x, spdz_x in zip(numpy_ret, spdz_ret):
relative_acc += abs(np_x - spdz_x) / max(abs(np_x), abs(spdz_x) + 1e-15)
else:
start = time.time()
for i in range(self.test_round):
numpy_ret = np.dot(data_x, data_y)
plain_text_time = time.time() - start
relative_acc = abs(numpy_ret - spdz_ret) / max(abs(numpy_ret), abs(spdz_ret))
relative_acc /= self.data_num
log2_acc = -np.log2(relative_acc) if relative_acc != 0 else 0
row_info = [op_type, total_time / self.data_num / self.test_round, total_time / self.test_round,
relative_acc, log2_acc, int(self.data_num * self.test_round / total_time)]
table.add_row(row_info)
self._summary[tensor_type][op_type] = row_info
return table.get_string(title=f"SPDZ {tensor_type} Computational performance")
def run(self, cpn_input):
LOGGER.info("begin to init parameters of secure add example")
self._init_runtime_parameters(cpn_input)
LOGGER.info("begin to make data")
self._init_data()
self._test_spdz()
return ComponentOutput(self.save_data(), self.export_model(), self.save_cache())
| 11,617 | 46.036437 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/test/spdz_test/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/test/spdz_test/spdz_test_param.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam
class SPDZTestParam(BaseParam):
def __init__(self, data_num=10000, test_round=1, data_partition=4, seed=123,
data_lower_bound=-1000000000, data_upper_bound=1000000000):
self.data_num = data_num
self.test_round = test_round
self.seed = seed
self.data_partition = data_partition
self.data_lower_bound = data_lower_bound
self.data_upper_bound = data_upper_bound
def check(self):
if self.seed is None or not isinstance(self.seed, int):
raise ValueError("random seed should be integer")
if not isinstance(self.test_round, int) or self.test_round < 1:
raise ValueError("test_round should be positive integer")
if not isinstance(self.data_num, int) or self.data_num < 1:
raise ValueError("data_num should be positive integer")
if not isinstance(self.data_partition, int) or self.data_partition < 1:
raise ValueError("data partition should be positive integer")
if not isinstance(self.data_upper_bound, (int, float)) or not isinstance(self.data_lower_bound, (int, float)):
raise ValueError("bound of data should be numeric")
| 1,911 | 39.680851 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/optim/activation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
def hard_sigmoid(x):
y = 0.2 * x + 0.5
return np.clip(y, 0, 1)
def softmax(x, axis=-1):
y = np.exp(x - np.max(x, axis, keepdims=True))
return y / np.sum(y, axis, keepdims=True)
def sigmoid(x):
if x <= 0:
a = np.exp(x)
a /= (1. + a)
else:
a = 1. / (1. + np.exp(-x))
return a
def softplus(x):
return np.log(1. + np.exp(x))
def softsign(x):
return x / (1 + np.abs(x))
def tanh(x):
return np.tanh(x)
def log_logistic(x):
if x <= 0:
a = x - np.log(1 + np.exp(x))
else:
a = - np.log(1 + np.exp(-x))
return a
| 1,244 | 20.842105 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/initialize.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections.abc import Iterable
import numpy as np
from federatedml.statistic import statics
from federatedml.util import LOGGER
class Initializer(object):
def zeros(self, data_shape, fit_intercept, data_instances):
"""
If fit intercept, use the following formula to initialize b can get a faster converge rate
b = log(P(1)/P(0))
"""
inits = np.zeros(data_shape)
if fit_intercept and data_instances is not None:
static_obj = statics.MultivariateStatisticalSummary(data_instances, cols_index=-1)
label_historgram = static_obj.get_label_histogram()
LOGGER.debug("label_histogram is : {}".format(label_historgram))
one_count = label_historgram.get(1)
zero_count = label_historgram.get(0, 0) + label_historgram.get(-1, 0)
init_intercept = np.log((one_count / zero_count))
inits[-1] = init_intercept
return inits
def random_normal(self, data_shape):
if isinstance(data_shape, Iterable):
inits = np.random.randn(*data_shape)
else:
inits = np.random.randn(data_shape)
return inits
def random_uniform(self, data_shape):
if isinstance(data_shape, Iterable):
inits = np.random.rand(*data_shape)
else:
inits = np.random.rand(data_shape)
return inits
def constant(self, data_shape, const):
inits = np.ones(data_shape) * const
return inits
def ones(self, data_shape):
inits = np.ones(data_shape)
return inits
def init_model(self, model_shape, init_params, data_instance=None):
init_method = init_params.init_method
fit_intercept = init_params.fit_intercept
random_seed = init_params.random_seed
np.random.seed(random_seed)
if fit_intercept:
if isinstance(model_shape, int):
model_shape += 1
else:
new_shape = []
for ds in model_shape:
new_shape.append(ds + 1)
model_shape = tuple(new_shape)
if init_method == 'random_normal':
w = self.random_normal(model_shape)
elif init_method == 'random_uniform':
w = self.random_uniform(model_shape)
elif init_method == 'ones':
w = self.ones(model_shape)
elif init_method == 'zeros':
w = self.zeros(model_shape, fit_intercept, data_instance)
elif init_method == 'const':
init_const = init_params.init_const
w = self.constant(model_shape, const=init_const)
else:
raise NotImplementedError("Initial method cannot be recognized: {}".format(init_method))
# LOGGER.debug("Inited model is :{}".format(w))
return w
| 3,459 | 35.041667 | 100 |
py
|
FATE
|
FATE-master/python/federatedml/optim/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/convergence.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.util import LOGGER
from federatedml.util import fate_operator
class _ConvergeFunction:
def __init__(self, eps):
self.eps = eps
def is_converge(self, loss): pass
class _DiffConverge(_ConvergeFunction):
"""
Judge convergence by the difference between two iterations.
If the difference is smaller than eps, converge flag will be provided.
"""
def __init__(self, eps):
super().__init__(eps=eps)
self.pre_loss = None
def is_converge(self, loss):
LOGGER.debug("In diff converge function, pre_loss: {}, current_loss: {}".format(self.pre_loss, loss))
converge_flag = False
if self.pre_loss is None:
pass
elif abs(self.pre_loss - loss) < self.eps:
converge_flag = True
self.pre_loss = loss
return converge_flag
class _AbsConverge(_ConvergeFunction):
"""
Judge converge by absolute loss value. When loss value smaller than eps, converge flag
will be provided.
"""
def is_converge(self, loss):
if loss <= self.eps:
converge_flag = True
else:
converge_flag = False
return converge_flag
class _WeightDiffConverge(_ConvergeFunction):
"""
Use 2-norm of weight difference to judge whether converge or not.
"""
def __init__(self, eps):
super().__init__(eps=eps)
self.pre_weight = None
def is_converge(self, weight):
if self.pre_weight is None:
self.pre_weight = weight
return False
weight_diff = fate_operator.norm(self.pre_weight - weight)
self.pre_weight = weight
if weight_diff < self.eps * np.max([fate_operator.norm(weight), 1]):
return True
return False
def converge_func_factory(early_stop, tol):
# try:
# converge_func = param.converge_func
# eps = param.eps
# except AttributeError:
# raise AttributeError("Converge Function parameters has not been totally set")
if early_stop == 'diff':
return _DiffConverge(tol)
elif early_stop == 'weight_diff':
return _WeightDiffConverge(tol)
elif early_stop == 'abs':
return _AbsConverge(tol)
else:
raise NotImplementedError("Converge Function method cannot be recognized: {}".format(early_stop))
| 2,988 | 28.303922 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/optim/optimizer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.util import LOGGER, consts, paillier_check, ipcl_operator
class _Optimizer(object):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu=0):
self.learning_rate = learning_rate
self.iters = 0
self.alpha = alpha
self.penalty = penalty
self.decay = decay
self.decay_sqrt = decay_sqrt
self.mu = mu
def decay_learning_rate(self):
if self.decay_sqrt:
lr = self.learning_rate / np.sqrt(1 + self.decay * self.iters)
else:
lr = self.learning_rate / (1 + self.decay * self.iters)
return lr
@property
def shrinkage_val(self):
this_step_size = self.learning_rate / np.sqrt(self.iters)
return self.alpha * this_step_size
def set_iters(self, iters):
self.iters = iters
def apply_gradients(self, grad):
raise NotImplementedError("Should not call here")
def _l1_updator(self, model_weights: LinearModelWeights, gradient):
coef_ = model_weights.coef_
if model_weights.fit_intercept:
gradient_without_intercept = gradient[: -1]
else:
gradient_without_intercept = gradient
new_weights = np.sign(coef_ - gradient_without_intercept) * np.maximum(0, np.abs(
coef_ - gradient_without_intercept) - self.shrinkage_val)
if model_weights.fit_intercept:
new_weights = np.append(new_weights, model_weights.intercept_)
new_weights[-1] -= gradient[-1]
new_param = LinearModelWeights(new_weights, model_weights.fit_intercept, model_weights.raise_overflow_error)
# LOGGER.debug("In _l1_updator, original weight: {}, new_weights: {}".format(
# model_weights.unboxed, new_weights
# ))
return new_param
def _l2_updator(self, lr_weights: LinearModelWeights, gradient):
"""
For l2 regularization, the regular term has been added in gradients.
"""
new_weights = lr_weights.unboxed - gradient
new_param = LinearModelWeights(new_weights, lr_weights.fit_intercept, lr_weights.raise_overflow_error)
return new_param
def add_regular_to_grad(self, grad, lr_weights):
if self.penalty == consts.L2_PENALTY:
if paillier_check.is_single_ipcl_encrypted_number(lr_weights.unboxed):
grad_ct = ipcl_operator.merge_encrypted_number_array(grad)
grad_ct = np.array(grad_ct)
if lr_weights.fit_intercept:
alpha = np.append(np.ones(len(grad) - 1) * self.alpha, 0.0)
new_grad = grad_ct + lr_weights.unboxed.item(0) * alpha
else:
new_grad = grad_ct + self.alpha * lr_weights.coef_
else:
if lr_weights.fit_intercept:
gradient_without_intercept = grad[: -1]
gradient_without_intercept += self.alpha * lr_weights.coef_
new_grad = np.append(gradient_without_intercept, grad[-1])
else:
new_grad = grad + self.alpha * lr_weights.coef_
else:
new_grad = grad
return new_grad
def regularization_update(self, model_weights: LinearModelWeights, grad,
prev_round_weights: LinearModelWeights = None):
# LOGGER.debug(f"In regularization_update, input model_weights: {model_weights.unboxed}")
if self.penalty == consts.L1_PENALTY:
model_weights = self._l1_updator(model_weights, grad)
elif self.penalty == consts.L2_PENALTY:
model_weights = self._l2_updator(model_weights, grad)
else:
new_vars = model_weights.unboxed - grad
model_weights = LinearModelWeights(new_vars,
model_weights.fit_intercept,
model_weights.raise_overflow_error)
if prev_round_weights is not None: # additional proximal term
coef_ = model_weights.unboxed
if model_weights.fit_intercept:
coef_without_intercept = coef_[: -1]
else:
coef_without_intercept = coef_
coef_without_intercept -= self.mu * (model_weights.coef_ - prev_round_weights.coef_)
if model_weights.fit_intercept:
new_coef_ = np.append(coef_without_intercept, coef_[-1])
else:
new_coef_ = coef_without_intercept
model_weights = LinearModelWeights(new_coef_,
model_weights.fit_intercept,
model_weights.raise_overflow_error)
return model_weights
def __l1_loss_norm(self, model_weights: LinearModelWeights):
coef_ = model_weights.coef_
loss_norm = np.sum(self.alpha * np.abs(coef_))
return loss_norm
def __l2_loss_norm(self, model_weights: LinearModelWeights):
coef_ = model_weights.coef_
loss_norm = 0.5 * self.alpha * np.dot(coef_, coef_)
return loss_norm
def __add_proximal(self, model_weights, prev_round_weights):
prev_round_coef_ = prev_round_weights.coef_
coef_ = model_weights.coef_
diff = coef_ - prev_round_coef_
loss_norm = self.mu * 0.5 * np.dot(diff, diff)
return loss_norm
def loss_norm(self, model_weights: LinearModelWeights, prev_round_weights: LinearModelWeights = None):
proximal_term = None
if prev_round_weights is not None:
proximal_term = self.__add_proximal(model_weights, prev_round_weights)
if self.penalty == consts.L1_PENALTY:
loss_norm_value = self.__l1_loss_norm(model_weights)
elif self.penalty == consts.L2_PENALTY:
loss_norm_value = self.__l2_loss_norm(model_weights)
else:
loss_norm_value = None
# additional proximal term
if loss_norm_value is None:
loss_norm_value = proximal_term
elif proximal_term is not None:
loss_norm_value += proximal_term
return loss_norm_value
def hess_vector_norm(self, delta_s: LinearModelWeights):
if self.penalty == consts.L1_PENALTY:
return LinearModelWeights(np.zeros_like(delta_s.unboxed),
fit_intercept=delta_s.fit_intercept,
raise_overflow_error=delta_s.raise_overflow_error)
elif self.penalty == consts.L2_PENALTY:
return LinearModelWeights(self.alpha * np.array(delta_s.unboxed),
fit_intercept=delta_s.fit_intercept,
raise_overflow_error=delta_s.raise_overflow_error)
else:
return LinearModelWeights(np.zeros_like(delta_s.unboxed),
fit_intercept=delta_s.fit_intercept,
raise_overflow_error=delta_s.raise_overflow_error)
def update_model(self, model_weights: LinearModelWeights, grad, prev_round_weights: LinearModelWeights = None,
has_applied=True):
if not has_applied:
grad = self.add_regular_to_grad(grad, model_weights)
delta_grad = self.apply_gradients(grad)
else:
delta_grad = grad
model_weights = self.regularization_update(model_weights, delta_grad, prev_round_weights)
return model_weights
class _SgdOptimizer(_Optimizer):
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
delta_grad = learning_rate * grad
# LOGGER.debug("In sgd optimizer, learning_rate: {}, delta_grad: {}".format(learning_rate, delta_grad))
return delta_grad
class _RMSPropOptimizer(_Optimizer):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu):
super().__init__(learning_rate, alpha, penalty, decay, decay_sqrt)
self.rho = 0.99
self.opt_m = None
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
if self.opt_m is None:
self.opt_m = np.zeros_like(grad)
self.opt_m = self.rho * self.opt_m + (1 - self.rho) * np.square(grad)
self.opt_m = np.array(self.opt_m, dtype=np.float64)
delta_grad = learning_rate * grad / np.sqrt(self.opt_m + 1e-6)
return delta_grad
class _AdaGradOptimizer(_Optimizer):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu):
super().__init__(learning_rate, alpha, penalty, decay, decay_sqrt)
self.opt_m = None
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
if self.opt_m is None:
self.opt_m = np.zeros_like(grad)
self.opt_m = self.opt_m + np.square(grad)
self.opt_m = np.array(self.opt_m, dtype=np.float64)
delta_grad = learning_rate * grad / (np.sqrt(self.opt_m) + 1e-7)
return delta_grad
class _NesterovMomentumSGDOpimizer(_Optimizer):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu):
super().__init__(learning_rate, alpha, penalty, decay, decay_sqrt)
self.nesterov_momentum_coeff = 0.9
self.opt_m = None
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
if self.opt_m is None:
self.opt_m = np.zeros_like(grad)
v = self.nesterov_momentum_coeff * self.opt_m - learning_rate * grad
delta_grad = self.nesterov_momentum_coeff * self.opt_m - (1 + self.nesterov_momentum_coeff) * v
self.opt_m = v
# LOGGER.debug('In nesterov_momentum, opt_m: {}, v: {}, delta_grad: {}'.format(
# self.opt_m, v, delta_grad
# ))
return delta_grad
class _AdamOptimizer(_Optimizer):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu):
super().__init__(learning_rate, alpha, penalty, decay, decay_sqrt)
self.opt_beta1 = 0.9
self.opt_beta2 = 0.999
self.opt_beta1_decay = 1.0
self.opt_beta2_decay = 1.0
self.opt_m = None
self.opt_v = None
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
if self.opt_m is None:
self.opt_m = np.zeros_like(grad)
if self.opt_v is None:
self.opt_v = np.zeros_like(grad)
self.opt_beta1_decay = self.opt_beta1_decay * self.opt_beta1
self.opt_beta2_decay = self.opt_beta2_decay * self.opt_beta2
self.opt_m = self.opt_beta1 * self.opt_m + (1 - self.opt_beta1) * grad
self.opt_v = self.opt_beta2 * self.opt_v + (1 - self.opt_beta2) * np.square(grad)
opt_m_hat = self.opt_m / (1 - self.opt_beta1_decay)
opt_v_hat = self.opt_v / (1 - self.opt_beta2_decay)
opt_v_hat = np.array(opt_v_hat, dtype=np.float64)
delta_grad = learning_rate * opt_m_hat / (np.sqrt(opt_v_hat) + 1e-8)
return delta_grad
class _StochasticQuansiNewtonOptimizer(_Optimizer):
def __init__(self, learning_rate, alpha, penalty, decay, decay_sqrt, mu):
super().__init__(learning_rate, alpha, penalty, decay, decay_sqrt)
self.__opt_hess = None
def apply_gradients(self, grad):
learning_rate = self.decay_learning_rate()
# LOGGER.debug("__opt_hess is: {}".format(self.__opt_hess))
if self.__opt_hess is None:
delta_grad = learning_rate * grad
else:
delta_grad = learning_rate * self.__opt_hess.dot(grad)
# LOGGER.debug("In sqn updater, grad: {}, delta_grad: {}".format(grad, delta_grad))
return delta_grad
def set_hess_matrix(self, hess_matrix):
self.__opt_hess = hess_matrix
def optimizer_factory(param):
try:
optimizer_type = param.optimizer
learning_rate = param.learning_rate
alpha = param.alpha
penalty = param.penalty
decay = param.decay
decay_sqrt = param.decay_sqrt
if hasattr(param, 'mu'):
mu = param.mu
else:
mu = 0.0
init_params = [learning_rate, alpha, penalty, decay, decay_sqrt, mu]
except AttributeError:
raise AttributeError("Optimizer parameters has not been totally set")
LOGGER.debug("in optimizer_factory, optimizer_type: {}, learning_rate: {}, alpha: {}, penalty: {},"
"decay: {}, decay_sqrt: {}".format(optimizer_type, *init_params))
if optimizer_type == 'sgd':
return _SgdOptimizer(*init_params)
elif optimizer_type == 'nesterov_momentum_sgd':
return _NesterovMomentumSGDOpimizer(*init_params)
elif optimizer_type == 'rmsprop':
return _RMSPropOptimizer(*init_params)
elif optimizer_type == 'adam':
return _AdamOptimizer(*init_params)
elif optimizer_type == 'adagrad':
return _AdaGradOptimizer(*init_params)
elif optimizer_type == 'sqn':
return _StochasticQuansiNewtonOptimizer(*init_params)
else:
raise NotImplementedError("Optimize method cannot be recognized: {}".format(optimizer_type))
| 13,991 | 38.863248 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/optim/test/convergence_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import unittest
from federatedml.optim.convergence import converge_func_factory
class TestConvergeFunction(unittest.TestCase):
def test_diff_converge(self):
loss = 50
eps = 0.00001
# converge_func = DiffConverge(eps=eps)
converge_func = converge_func_factory(early_stop='diff', tol=eps)
iter_num = 0
pre_loss = loss
while iter_num < 500:
loss *= 0.5
converge_flag = converge_func.is_converge(loss)
if converge_flag:
break
iter_num += 1
pre_loss = loss
self.assertTrue(math.fabs(pre_loss - loss) <= eps)
def test_abs_converge(self):
loss = 50
eps = 0.00001
# converge_func = AbsConverge(eps=eps)
converge_func = converge_func_factory(early_stop='abs', tol=eps)
iter_num = 0
while iter_num < 500:
loss *= 0.5
converge_flag = converge_func.is_converge(loss)
if converge_flag:
break
iter_num += 1
self.assertTrue(math.fabs(loss) <= eps)
if __name__ == '__main__':
unittest.main()
| 1,784 | 29.775862 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/test/initialize_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from federatedml.optim.initialize import Initializer
from federatedml.param import InitParam
from federatedml.util import consts
import numpy as np
class TestInitialize(unittest.TestCase):
def test_initializer(self):
initializer = Initializer()
data_shape = 10
init_param_obj = InitParam(init_method=consts.RANDOM_NORMAL,
init_const=20,
fit_intercept=False
)
model = initializer.init_model(model_shape=data_shape, init_params=init_param_obj)
model_shape = np.array(model).shape
self.assertTrue(model_shape == (10,))
if __name__ == '__main__':
unittest.main()
| 1,358 | 32.975 | 90 |
py
|
FATE
|
FATE-master/python/federatedml/optim/test/activation_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
import numpy as np
from federatedml.optim import activation
class TestConvergeFunction(unittest.TestCase):
def test_numeric_stability(self):
x_list = np.linspace(-709, 709, 10000)
# Original function
# a = 1. / (1. + np.exp(-x))
for x in x_list:
a1 = 1. / (1. + np.exp(-x))
a2 = activation.sigmoid(x)
self.assertTrue(np.abs(a1 - a2) < 1e-5)
if __name__ == '__main__':
unittest.main()
| 1,155 | 28.641026 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/test/optimizer_test.py
|
import math
import unittest
import numpy as np
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.optim.optimizer import _SgdOptimizer
class TestInitialize(unittest.TestCase):
def test_optimizer(self):
model_weights = LinearModelWeights(np.array([0.10145129, 0.39987222, -0.96630206, -0.41208423, -0.24609715,
-0.70518652, 0.71478064, 0.57973894, 0.5703622, -0.45482125,
0.32676194, -0.00648212, 0.35542874, -0.26412695, -0.07964603,
1.2158522, -0.41255564, -0.01686044, -0.99897542, 1.56407211,
0.52040711, 0.24568055, 0.4880494, 0.52269909, -0.14431923,
0.03282471, 0.09437969, 0.21407206, -0.270922]), True)
prev_model_weights = LinearModelWeights(np.array([0.10194331, 0.40062114, -0.96597859, -0.41202348, -0.24587005,
-0.7047801, 0.71515712, 0.58045583, 0.57079086, -0.45473676,
0.32775863, -0.00633238, 0.35567219, -0.26343469, -0.07964763,
1.2165642, -0.41244749, -0.01589344, -0.99862982, 1.56498698,
0.52058152, 0.24572171, 0.48809946, 0.52272993, -0.14330367,
0.03283002, 0.09439601, 0.21433497, -0.27011673]), True)
prev_model_weights_null = None
eps = 0.00001
# 1: alpha = 0, no regularization
learning_rate = 0.2
alpha = 0
penalty = "L2"
decay = "0.2"
decay_sqrt = "true"
mu = 0.01
init_params = [learning_rate, alpha, penalty, decay, decay_sqrt, mu]
optimizer = _SgdOptimizer(*init_params)
loss_norm = optimizer.loss_norm(model_weights, prev_model_weights_null)
self.assertTrue(math.fabs(loss_norm) <= eps) # == 0
# 2
alpha = 0.1
init_params = [learning_rate, alpha, penalty, decay, decay_sqrt, mu]
optimizer = _SgdOptimizer(*init_params)
loss_norm = optimizer.loss_norm(model_weights, prev_model_weights_null)
print("loss_norm = {}".format(loss_norm))
self.assertTrue(math.fabs(loss_norm - 0.47661579875266186) <= eps)
# 3
loss_norm = optimizer.loss_norm(model_weights, prev_model_weights)
print("loss_norm = {}".format(loss_norm))
self.assertTrue(math.fabs(loss_norm - 0.47661583737200075) <= eps)
if __name__ == '__main__':
unittest.main()
| 2,785 | 46.220339 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/optim/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/hetero_poisson_gradient_and_loss.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.framework.hetero.sync import loss_sync
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.util.fate_operator import reduce_add, vec_dot
class Guest(hetero_linear_model_gradient.Guest, loss_sync.Guest):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward,
transfer_variables.fore_gradient,
transfer_variables.guest_gradient,
transfer_variables.guest_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_gradient_procedure(self, data_instances, cipher, model_weights, optimizer,
n_iter_, batch_index, offset=None):
current_suffix = (n_iter_, batch_index)
fore_gradient = self.compute_and_aggregate_forwards(data_instances, model_weights, cipher,
batch_index, current_suffix, offset)
self.remote_fore_gradient(fore_gradient, suffix=current_suffix)
unilateral_gradient = self.compute_gradient(data_instances,
fore_gradient,
model_weights.fit_intercept)
if optimizer is not None:
unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)
optimized_gradient = self.update_gradient(unilateral_gradient, suffix=current_suffix)
return optimized_gradient
def compute_and_aggregate_forwards(self, data_instances, model_weights, cipher,
batch_index, current_suffix, offset=None):
'''
Compute gradients:
gradient = (1/N) * \\sum(exp(wx) - y) * x
Define exp(wx) as mu, named it as guest_forward or host_forward
Define (mu-y) as fore_gradient
Then, gradient = fore_gradient * x
'''
if offset is None:
raise ValueError("Offset should be provided when compute poisson forwards")
mu = data_instances.join(offset, lambda d, m: np.exp(vec_dot(d.features, model_weights.coef_)
+ model_weights.intercept_ + m))
self.forwards = mu
self.host_forwards = self.get_host_forward(suffix=current_suffix)
self.aggregated_forwards = self.forwards.join(self.host_forwards[0], lambda g, h: g * h)
fore_gradient = self.aggregated_forwards.join(data_instances, lambda mu, d: mu - d.label)
return fore_gradient
def compute_loss(self, data_instances, model_weights, n_iter_, batch_index, offset, loss_norm=None):
'''
Compute hetero poisson loss:
loss = sum(exp(mu_g)*exp(mu_h) - y(wx_g + wx_h) + log(exposure))
Parameters:
___________
data_instances: Table, input data
model_weights: model weight object, stores intercept_ and coef_
n_iter_: int, current number of iter.
batch_index: int, use to obtain current encrypted_calculator index
offset: log(exposure)
loss_norm: penalty term, default to None
'''
current_suffix = (n_iter_, batch_index)
n = data_instances.count()
guest_wx_y = data_instances.join(offset,
lambda v, m: (
vec_dot(v.features, model_weights.coef_) + model_weights.intercept_ + m,
v.label))
loss_list = []
host_wxs = self.get_host_loss_intermediate(current_suffix)
if loss_norm is not None:
host_loss_regular = self.get_host_loss_regular(suffix=current_suffix)
else:
host_loss_regular = []
if len(self.host_forwards) > 1:
raise ValueError("More than one host exists. Poisson regression does not support multi-host.")
host_mu = self.host_forwards[0]
host_wx = host_wxs[0]
loss_wx = guest_wx_y.join(host_wx, lambda g, h: g[1] * (g[0] + h)).reduce(reduce_add)
loss_mu = self.forwards.join(host_mu, lambda g, h: g * h).reduce(reduce_add)
loss = (loss_mu - loss_wx) / n
if loss_norm is not None:
loss = loss + loss_norm + host_loss_regular[0]
loss_list.append(loss)
self.sync_loss_info(loss_list, suffix=current_suffix)
class Host(hetero_linear_model_gradient.Host, loss_sync.Host):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward,
transfer_variables.fore_gradient,
transfer_variables.host_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_gradient_procedure(self, data_instances, cipher, model_weights,
optimizer,
n_iter_, batch_index):
"""
Linear model gradient procedure
Step 1: get host forwards which differ for different algorithms
"""
current_suffix = (n_iter_, batch_index)
self.forwards = self.compute_forwards(data_instances, model_weights)
encrypted_forward = cipher.distribute_encrypt(self.forwards)
self.remote_host_forward(encrypted_forward, suffix=current_suffix)
fore_gradient = self.get_fore_gradient(suffix=current_suffix)
unilateral_gradient = self.compute_gradient(data_instances,
fore_gradient,
model_weights.fit_intercept)
if optimizer is not None:
unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)
optimized_gradient = self.update_gradient(unilateral_gradient, suffix=current_suffix)
return optimized_gradient
def compute_forwards(self, data_instances, model_weights):
mu = data_instances.mapValues(
lambda v: np.exp(vec_dot(v.features, model_weights.coef_) + model_weights.intercept_))
return mu
def compute_loss(self, data_instances, model_weights,
optimizer, n_iter_, batch_index, cipher):
'''
Compute hetero poisson loss:
h_loss = sum(exp(mu_h))
Parameters:
___________
data_instances: Table, input data
model_weights: model weight object, stores intercept_ and coef_
optimizer: optimizer object
n_iter_: int, current number of iter.
cipher: cipher for encrypt intermediate loss and loss_regular
'''
current_suffix = (n_iter_, batch_index)
self_wx = data_instances.mapValues(
lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_)
en_wx = cipher.distribute_encrypt(self_wx)
self.remote_loss_intermediate(en_wx, suffix=current_suffix)
loss_regular = optimizer.loss_norm(model_weights)
if loss_regular is not None:
en_loss_regular = cipher.encrypt(loss_regular)
self.remote_loss_regular(en_loss_regular, suffix=current_suffix)
class Arbiter(hetero_linear_model_gradient.Arbiter, loss_sync.Arbiter):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.guest_gradient,
transfer_variables.host_gradient,
transfer_variables.guest_optim_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.loss)
def compute_loss(self, cipher, n_iter_, batch_index):
'''
Decrypt loss from guest
'''
current_suffix = (n_iter_, batch_index)
loss_list = self.sync_loss_info(suffix=current_suffix)
de_loss_list = cipher.decrypt_list(loss_list)
return de_loss_list
| 9,203 | 42.828571 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/sqn_sync.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.util import consts
class SqnSyncBase(object):
def __init__(self):
self.batch_data_index_transfer = None
self.host_forwards_transfer = None
self.forward_hess = None
self.forward_hess_transfer = None
class Guest(SqnSyncBase):
def __init__(self):
super().__init__()
self.guest_hess_vector = None
def register_transfer_variable(self, transfer_variable):
self.batch_data_index_transfer = transfer_variable.sqn_sample_index
self.guest_hess_vector = transfer_variable.guest_hess_vector
self.host_forwards_transfer = transfer_variable.host_sqn_forwards
self.forward_hess_transfer = transfer_variable.forward_hess
def sync_sample_data(self, data_instances, sample_size, random_seed, suffix=tuple()):
n = data_instances.count()
if sample_size >= n:
sample_rate = 1.0
else:
sample_rate = sample_size / n
sampled_data = data_instances.sample(fraction=sample_rate, seed=random_seed)
batch_index = sampled_data.mapValues(lambda x: None)
self.batch_data_index_transfer.remote(obj=batch_index,
role=consts.HOST,
suffix=suffix)
return sampled_data
def get_host_forwards(self, suffix=tuple()):
host_forwards = self.host_forwards_transfer.get(idx=-1,
suffix=suffix)
return host_forwards
def remote_forward_hess(self, forward_hess, suffix=tuple()):
self.forward_hess_transfer.remote(obj=forward_hess,
role=consts.HOST,
suffix=suffix)
def sync_hess_vector(self, hess_vector, suffix):
self.guest_hess_vector.remote(obj=hess_vector,
role=consts.ARBITER,
suffix=suffix)
class Host(SqnSyncBase):
def __init__(self):
super().__init__()
self.host_hess_vector = None
def register_transfer_variable(self, transfer_variable):
self.batch_data_index_transfer = transfer_variable.sqn_sample_index
self.host_forwards_transfer = transfer_variable.host_sqn_forwards
self.host_hess_vector = transfer_variable.host_hess_vector
self.forward_hess_transfer = transfer_variable.forward_hess
def sync_sample_data(self, data_instances, suffix=tuple()):
batch_index = self.batch_data_index_transfer.get(idx=0,
suffix=suffix)
sample_data = data_instances.join(batch_index, lambda x, y: x)
return sample_data
def remote_host_forwards(self, host_forwards, suffix=tuple()):
self.host_forwards_transfer.remote(obj=host_forwards,
role=consts.GUEST,
suffix=suffix)
def get_forward_hess(self, suffix=tuple()):
forward_hess = self.forward_hess_transfer.get(idx=0,
suffix=suffix)
return forward_hess
def sync_hess_vector(self, hess_vector, suffix):
self.host_hess_vector.remote(obj=hess_vector,
role=consts.ARBITER,
suffix=suffix)
class Arbiter(object):
def __init__(self):
super().__init__()
self.guest_hess_vector = None
self.host_hess_vector = None
def register_transfer_variable(self, transfer_variable):
self.guest_hess_vector = transfer_variable.guest_hess_vector
self.host_hess_vector = transfer_variable.host_hess_vector
def sync_hess_vector(self, suffix):
guest_hess_vector = self.guest_hess_vector.get(idx=0,
suffix=suffix)
host_hess_vectors = self.host_hess_vector.get(idx=-1,
suffix=suffix)
host_hess_vectors = [x.reshape(-1) for x in host_hess_vectors]
hess_vectors = np.hstack((h for h in host_hess_vectors))
hess_vectors = np.hstack((hess_vectors, guest_hess_vector))
return hess_vectors
| 5,004 | 39.04 | 89 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/hetero_lr_gradient_and_loss.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.framework.hetero.sync import loss_sync
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.util import LOGGER
from federatedml.util.fate_operator import reduce_add, vec_dot
class Guest(hetero_linear_model_gradient.Guest, loss_sync.Guest):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward_dict,
transfer_variables.fore_gradient,
transfer_variables.guest_gradient,
transfer_variables.guest_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_half_d(self, data_instances, w, cipher, batch_index, current_suffix):
if self.use_sample_weight:
self.half_d = data_instances.mapValues(
lambda v: 0.25 * (vec_dot(v.features, w.coef_) + w.intercept_) * v.weight - 0.5 * v.label * v.weight)
else:
self.half_d = data_instances.mapValues(
lambda v: 0.25 * (vec_dot(v.features, w.coef_) + w.intercept_) - 0.5 * v.label)
# encrypted_half_d = cipher[batch_index].encrypt(self.half_d)
# self.fore_gradient_transfer.remote(encrypted_half_d, suffix=current_suffix)
return self.half_d
def compute_and_aggregate_forwards(self, data_instances, half_g, encrypted_half_g, batch_index,
current_suffix, offset=None):
"""
gradient = (1/N)*∑(1/2*ywx-1)*1/2yx = (1/N)*∑(0.25 * wx - 0.5 * y) * x, where y = 1 or -1
Define wx as guest_forward or host_forward
Define (0.25 * wx - 0.5 * y) as fore_gradient
"""
self.host_forwards = self.get_host_forward(suffix=current_suffix)
# fore_gradient = half_g
# for host_forward in self.host_forwards:
# fore_gradient = fore_gradient.join(host_forward, lambda g, h: g + h)
# fore_gradient = self.aggregated_forwards.join(data_instances, lambda wx, d: 0.25 * wx - 0.5 * d.label)
return self.host_forwards
def compute_loss(self, data_instances, w, n_iter_, batch_index, loss_norm=None, batch_masked=False):
"""
Compute hetero-lr loss for:
loss = (1/N)*∑(log2 - 1/2*ywx + 1/8*(wx)^2), where y is label, w is model weight and x is features
where (wx)^2 = (Wg * Xg + Wh * Xh)^2 = (Wg*Xg)^2 + (Wh*Xh)^2 + 2 * Wg*Xg * Wh*Xh
Then loss = log2 - (1/N)*0.5*∑ywx + (1/N)*0.125*[∑(Wg*Xg)^2 + ∑(Wh*Xh)^2 + 2 * ∑(Wg*Xg * Wh*Xh)]
where Wh*Xh is a table obtain from host and ∑(Wh*Xh)^2 is a sum number get from host.
"""
current_suffix = (n_iter_, batch_index)
n = data_instances.count()
# host_wx_y = self.host_forwards[0].join(data_instances, lambda x, y: (x, y.label))
host_wx_y = data_instances.join(self.host_forwards[0], lambda y, x: (x, y.label))
self_wx_y = self.half_d.join(data_instances, lambda x, y: (x, y.label))
def _sum_ywx(wx_y):
sum1, sum2 = 0, 0
for _, (x, y) in wx_y:
if y == 1:
sum1 += x
else:
sum2 -= x
return sum1 + sum2
ywx = host_wx_y.applyPartitions(_sum_ywx).reduce(reduce_add) + \
self_wx_y.applyPartitions(_sum_ywx).reduce(reduce_add)
ywx = ywx * 4 + 2 * n
# quarter_wx = self.host_forwards[0].join(self.half_d, lambda x, y: x + y)
# ywx = quarter_wx.join(data_instances, lambda wx, d: wx * (4 * d.label) + 2).reduce(reduce_add)
half_wx = data_instances.mapValues(
lambda v: vec_dot(v.features, w.coef_) + w.intercept_)
self_wx_square = half_wx.mapValues(
lambda v: np.square(v)).reduce(reduce_add)
# self_wx_square = data_instances.mapValues(
# lambda v: np.square(vec_dot(v.features, w.coef_) + w.intercept_)).reduce(reduce_add)
loss_list = []
wx_squares = self.get_host_loss_intermediate(suffix=current_suffix)
if batch_masked:
wx_squares_sum = []
for square_table in wx_squares:
square_sum = data_instances.join(
square_table,
lambda inst,
enc_h_squares: enc_h_squares).reduce(
lambda x,
y: x + y)
wx_squares_sum.append(square_sum)
wx_squares = wx_squares_sum
if loss_norm is not None:
host_loss_regular = self.get_host_loss_regular(suffix=current_suffix)
else:
host_loss_regular = []
# for host_idx, host_forward in enumerate(self.host_forwards):
if len(self.host_forwards) > 1:
LOGGER.info("More than one host exist, loss is not available")
else:
host_forward = self.host_forwards[0]
wx_square = wx_squares[0]
wxg_wxh = half_wx.join(host_forward, lambda wxg, wxh: wxg * wxh).reduce(reduce_add)
loss = np.log(2) - 0.5 * (1 / n) * ywx + 0.125 * (1 / n) * \
(self_wx_square + wx_square + 8 * wxg_wxh)
if loss_norm is not None:
loss += loss_norm
loss += host_loss_regular[0]
loss_list.append(loss)
LOGGER.debug("In compute_loss, loss list are: {}".format(loss_list))
self.sync_loss_info(loss_list, suffix=current_suffix)
def compute_forward_hess(self, data_instances, delta_s, host_forwards):
"""
To compute Hessian matrix, y, s are needed.
g = (1/N)*∑(0.25 * wx - 0.5 * y) * x
y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x
define forward_hess = (1/N)*∑(0.25 * x * s)
"""
forwards = data_instances.mapValues(
lambda v: (vec_dot(v.features, delta_s.coef_) + delta_s.intercept_) * 0.25)
for host_forward in host_forwards:
forwards = forwards.join(host_forward, lambda g, h: g + (h * 0.25))
if self.use_sample_weight:
forwards = forwards.join(data_instances, lambda h, d: h * d.weight)
# forward_hess = forwards.mapValues(lambda x: 0.25 * x / sample_size)
hess_vector = self.compute_gradient(data_instances,
forwards,
delta_s.fit_intercept)
return forwards, np.array(hess_vector)
class Host(hetero_linear_model_gradient.Host, loss_sync.Host):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward_dict,
transfer_variables.fore_gradient,
transfer_variables.host_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_forwards(self, data_instances, model_weights):
"""
forwards = 1/4 * wx
"""
# wx = data_instances.mapValues(lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_)
self.forwards = data_instances.mapValues(lambda v: 0.25 * vec_dot(v.features, model_weights.coef_))
return self.forwards
def compute_half_g(self, data_instances, w, cipher, batch_index):
half_g = data_instances.mapValues(
lambda v: vec_dot(v.features, w.coef_) * 0.25 + w.intercept_)
encrypt_half_g = cipher[batch_index].encrypt(half_g)
return half_g, encrypt_half_g
def compute_loss(self, lr_weights, optimizer, n_iter_, batch_index, cipher_operator, batch_masked=False):
"""
Compute hetero-lr loss for:
loss = (1/N)*∑(log2 - 1/2*ywx + 1/8*(wx)^2), where y is label, w is model weight and x is features
where (wx)^2 = (Wg * Xg + Wh * Xh)^2 = (Wg*Xg)^2 + (Wh*Xh)^2 + 2 * Wg*Xg * Wh*Xh
Then loss = log2 - (1/N)*0.5*∑ywx + (1/N)*0.125*[∑(Wg*Xg)^2 + ∑(Wh*Xh)^2 + 2 * ∑(Wg*Xg * Wh*Xh)]
where Wh*Xh is a table obtain from host and ∑(Wh*Xh)^2 is a sum number get from host.
"""
current_suffix = (n_iter_, batch_index)
# self_wx_square = self.forwards.mapValues(lambda x: np.square(4 * x)).reduce(reduce_add)
self_wx_square = self.forwards.mapValues(lambda x: np.square(4 * x))
if not batch_masked:
self_wx_square = self_wx_square.reduce(reduce_add)
en_wx_square = cipher_operator.encrypt(self_wx_square)
else:
en_wx_square = self_wx_square.mapValues(lambda x: cipher_operator.encrypt(x))
self.remote_loss_intermediate(en_wx_square, suffix=current_suffix)
loss_regular = optimizer.loss_norm(lr_weights)
if loss_regular is not None:
en_loss_regular = cipher_operator.encrypt(loss_regular)
self.remote_loss_regular(en_loss_regular, suffix=current_suffix)
class Arbiter(hetero_linear_model_gradient.Arbiter, loss_sync.Arbiter):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.guest_gradient,
transfer_variables.host_gradient,
transfer_variables.guest_optim_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.loss)
def compute_loss(self, cipher, n_iter_, batch_index):
"""
Compute hetero-lr loss for:
loss = (1/N)*∑(log2 - 1/2*ywx + 1/8*(wx)^2), where y is label, w is model weight and x is features
where (wx)^2 = (Wg * Xg + Wh * Xh)^2 = (Wg*Xg)^2 + (Wh*Xh)^2 + 2 * Wg*Xg * Wh*Xh
Then loss = log2 - (1/N)*0.5*∑ywx + (1/N)*0.125*[∑(Wg*Xg)^2 + ∑(Wh*Xh)^2 + 2 * ∑(Wg*Xg * Wh*Xh)]
where Wh*Xh is a table obtain from host and ∑(Wh*Xh)^2 is a sum number get from host.
"""
if self.has_multiple_hosts:
LOGGER.info("Has more than one host, loss is not available")
return []
current_suffix = (n_iter_, batch_index)
loss_list = self.sync_loss_info(suffix=current_suffix)
de_loss_list = cipher.decrypt_list(loss_list)
return de_loss_list
| 11,297 | 45.68595 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/hetero_linear_model_gradient.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
import scipy.sparse as sp
from federatedml.feature.sparse_vector import SparseVector
from federatedml.statistic import data_overview
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util import fate_operator
from federatedml.util.fixpoint_solver import FixedPointEncoder
class HeteroGradientBase(object):
def __init__(self):
self.use_async = False
self.use_sample_weight = False
self.fixed_point_encoder = None
def compute_gradient_procedure(self, *args):
raise NotImplementedError("Should not call here")
def set_total_batch_nums(self, total_batch_nums):
"""
Use for sqn gradient.
"""
pass
def set_use_async(self):
self.use_async = True
def set_use_sync(self):
self.use_async = False
def set_use_sample_weight(self):
self.use_sample_weight = True
def set_fixed_float_precision(self, floating_point_precision):
if floating_point_precision is not None:
self.fixed_point_encoder = FixedPointEncoder(2**floating_point_precision)
@staticmethod
def __apply_cal_gradient(data, fixed_point_encoder, is_sparse):
all_g = None
for key, (feature, d) in data:
if is_sparse:
x = np.zeros(feature.get_shape())
for idx, v in feature.get_all_data():
x[idx] = v
feature = x
if fixed_point_encoder:
# g = (feature * 2 ** floating_point_precision).astype("int") * d
g = fixed_point_encoder.encode(feature) * d
else:
g = feature * d
if all_g is None:
all_g = g
else:
all_g += g
if all_g is None:
return all_g
elif fixed_point_encoder:
all_g = fixed_point_encoder.decode(all_g)
return all_g
def compute_gradient(self, data_instances, fore_gradient, fit_intercept, need_average=True):
"""
Compute hetero-regression gradient
Parameters
----------
data_instances: Table, input data
fore_gradient: Table, fore_gradient
fit_intercept: bool, if model has intercept or not
need_average: bool, gradient needs to be averaged or not
Returns
----------
Table
the hetero regression model's gradient
"""
# feature_num = data_overview.get_features_shape(data_instances)
# data_count = data_instances.count()
is_sparse = data_overview.is_sparse_data(data_instances)
LOGGER.debug("Use apply partitions")
feat_join_grad = data_instances.join(fore_gradient,
lambda d, g: (d.features, g))
f = functools.partial(self.__apply_cal_gradient,
fixed_point_encoder=self.fixed_point_encoder,
is_sparse=is_sparse)
gradient_sum = feat_join_grad.applyPartitions(f)
gradient_sum = gradient_sum.reduce(lambda x, y: x + y)
if fit_intercept:
# bias_grad = np.sum(fore_gradient)
bias_grad = fore_gradient.reduce(lambda x, y: x + y)
gradient_sum = np.append(gradient_sum, bias_grad)
if need_average:
gradient = gradient_sum / data_instances.count()
else:
gradient = gradient_sum
"""
else:
LOGGER.debug(f"Original_method")
feat_join_grad = data_instances.join(fore_gradient,
lambda d, g: (d.features, g))
f = functools.partial(self.__compute_partition_gradient,
fit_intercept=fit_intercept,
is_sparse=is_sparse)
gradient_partition = feat_join_grad.applyPartitions(f)
gradient_partition = gradient_partition.reduce(lambda x, y: x + y)
gradient = gradient_partition / data_count
"""
return gradient
class Guest(HeteroGradientBase):
def __init__(self):
super().__init__()
self.half_d = None
self.host_forwards = None
self.forwards = None
self.aggregated_forwards = None
def _register_gradient_sync(self, host_forward_transfer, fore_gradient_transfer,
guest_gradient_transfer, guest_optim_gradient_transfer):
self.host_forward_transfer = host_forward_transfer
self.fore_gradient_transfer = fore_gradient_transfer
self.unilateral_gradient_transfer = guest_gradient_transfer
self.unilateral_optim_gradient_transfer = guest_optim_gradient_transfer
def compute_and_aggregate_forwards(self, data_instances, model_weights,
cipher, batch_index, current_suffix, offset=None):
raise NotImplementedError("Function should not be called here")
def compute_half_d(self, data_instances, w, cipher, batch_index, current_suffix):
raise NotImplementedError("Function should not be called here")
def _asynchronous_compute_gradient(self, data_instances, model_weights, cipher, current_suffix):
LOGGER.debug("Called asynchronous gradient")
encrypted_half_d = cipher.distribute_encrypt(self.half_d)
self.remote_fore_gradient(encrypted_half_d, suffix=current_suffix)
half_g = self.compute_gradient(data_instances, self.half_d, False)
self.host_forwards = self.get_host_forward(suffix=current_suffix)
host_forward = self.host_forwards[0]
host_half_g = self.compute_gradient(data_instances, host_forward, False)
unilateral_gradient = half_g + host_half_g
if model_weights.fit_intercept:
n = data_instances.count()
intercept = (host_forward.reduce(lambda x, y: x + y) + self.half_d.reduce(lambda x, y: x + y)) / n
unilateral_gradient = np.append(unilateral_gradient, intercept)
return unilateral_gradient
def _centralized_compute_gradient(self, data_instances, model_weights, cipher, current_suffix, masked_index=None):
self.host_forwards = self.get_host_forward(suffix=current_suffix)
fore_gradient = self.half_d
batch_size = data_instances.count()
partial_masked_index_enc = None
if masked_index:
masked_index = masked_index.mapValues(lambda value: 0)
masked_index_to_encrypt = masked_index.subtractByKey(self.half_d)
partial_masked_index_enc = cipher.distribute_encrypt(masked_index_to_encrypt)
for host_forward in self.host_forwards:
if self.use_sample_weight:
# host_forward = host_forward.join(data_instances, lambda h, v: h * v.weight)
host_forward = data_instances.join(host_forward, lambda v, h: h * v.weight)
fore_gradient = fore_gradient.join(host_forward, lambda x, y: x + y)
def _apply_obfuscate(val):
val.apply_obfuscator()
return val
fore_gradient = fore_gradient.mapValues(lambda val: _apply_obfuscate(val) / batch_size)
if partial_masked_index_enc:
masked_fore_gradient = partial_masked_index_enc.union(fore_gradient)
self.remote_fore_gradient(masked_fore_gradient, suffix=current_suffix)
else:
self.remote_fore_gradient(fore_gradient, suffix=current_suffix)
# self.remote_fore_gradient(fore_gradient, suffix=current_suffix)
unilateral_gradient = self.compute_gradient(data_instances, fore_gradient,
model_weights.fit_intercept, need_average=False)
return unilateral_gradient
def compute_gradient_procedure(self, data_instances, cipher, model_weights, optimizer,
n_iter_, batch_index, offset=None, masked_index=None):
"""
Linear model gradient procedure
Step 1: get host forwards which differ from different algorithm
For Logistic Regression and Linear Regression: forwards = wx
For Poisson Regression, forwards = exp(wx)
Step 2: Compute self forwards and aggregate host forwards and get d = fore_gradient
Step 3: Compute unilateral gradient = ∑d*x,
Step 4: Send unilateral gradients to arbiter and received the optimized and decrypted gradient.
"""
current_suffix = (n_iter_, batch_index)
# self.host_forwards = self.get_host_forward(suffix=current_suffix)
# Compute Guest's partial d
self.compute_half_d(data_instances, model_weights, cipher,
batch_index, current_suffix)
if self.use_async:
unilateral_gradient = self._asynchronous_compute_gradient(data_instances, model_weights,
cipher=cipher,
current_suffix=current_suffix)
else:
unilateral_gradient = self._centralized_compute_gradient(data_instances, model_weights,
cipher=cipher,
current_suffix=current_suffix,
masked_index=masked_index)
if optimizer is not None:
unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)
optimized_gradient = self.update_gradient(unilateral_gradient, suffix=current_suffix)
# LOGGER.debug(f"Before return, optimized_gradient: {optimized_gradient}")
return optimized_gradient
def get_host_forward(self, suffix=tuple()):
host_forward = self.host_forward_transfer.get(idx=-1, suffix=suffix)
return host_forward
def remote_fore_gradient(self, fore_gradient, suffix=tuple()):
self.fore_gradient_transfer.remote(obj=fore_gradient, role=consts.HOST, idx=-1, suffix=suffix)
def update_gradient(self, unilateral_gradient, suffix=tuple()):
self.unilateral_gradient_transfer.remote(unilateral_gradient, role=consts.ARBITER, idx=0, suffix=suffix)
optimized_gradient = self.unilateral_optim_gradient_transfer.get(idx=0, suffix=suffix)
return optimized_gradient
class Host(HeteroGradientBase):
def __init__(self):
super().__init__()
self.forwards = None
self.fore_gradient = None
def _register_gradient_sync(self, host_forward_transfer, fore_gradient_transfer,
host_gradient_transfer, host_optim_gradient_transfer):
self.host_forward_transfer = host_forward_transfer
self.fore_gradient_transfer = fore_gradient_transfer
self.unilateral_gradient_transfer = host_gradient_transfer
self.unilateral_optim_gradient_transfer = host_optim_gradient_transfer
def compute_forwards(self, data_instances, model_weights):
raise NotImplementedError("Function should not be called here")
def compute_unilateral_gradient(self, data_instances, fore_gradient, model_weights, optimizer):
raise NotImplementedError("Function should not be called here")
def _asynchronous_compute_gradient(self, data_instances, cipher, current_suffix):
encrypted_forward = cipher.distribute_encrypt(self.forwards)
self.remote_host_forward(encrypted_forward, suffix=current_suffix)
half_g = self.compute_gradient(data_instances, self.forwards, False)
guest_half_d = self.get_fore_gradient(suffix=current_suffix)
guest_half_g = self.compute_gradient(data_instances, guest_half_d, False)
unilateral_gradient = half_g + guest_half_g
return unilateral_gradient
def _centralized_compute_gradient(self, data_instances, cipher, current_suffix):
encrypted_forward = cipher.distribute_encrypt(self.forwards)
self.remote_host_forward(encrypted_forward, suffix=current_suffix)
fore_gradient = self.fore_gradient_transfer.get(idx=0, suffix=current_suffix)
# Host case, never fit-intercept
unilateral_gradient = self.compute_gradient(data_instances, fore_gradient, False, need_average=False)
return unilateral_gradient
def compute_gradient_procedure(self, data_instances, cipher, model_weights,
optimizer,
n_iter_, batch_index):
"""
Linear model gradient procedure
Step 1: get host forwards which differ from different algorithm
For Logistic Regression: forwards = wx
"""
current_suffix = (n_iter_, batch_index)
self.forwards = self.compute_forwards(data_instances, model_weights)
if self.use_async:
unilateral_gradient = self._asynchronous_compute_gradient(data_instances,
cipher,
current_suffix)
else:
unilateral_gradient = self._centralized_compute_gradient(data_instances,
cipher,
current_suffix)
if optimizer is not None:
unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)
optimized_gradient = self.update_gradient(unilateral_gradient, suffix=current_suffix)
LOGGER.debug(f"Before return compute_gradient_procedure")
return optimized_gradient
def compute_sqn_forwards(self, data_instances, delta_s, cipher):
"""
To compute Hessian matrix, y, s are needed.
g = (1/N)*∑(0.25 * wx - 0.5 * y) * x
y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x
define forward_hess = ∑(0.25 * x * s)
"""
sqn_forwards = data_instances.mapValues(
lambda v: cipher.encrypt(fate_operator.vec_dot(v.features, delta_s.coef_) + delta_s.intercept_))
# forward_sum = sqn_forwards.reduce(reduce_add)
return sqn_forwards
def compute_forward_hess(self, data_instances, delta_s, forward_hess):
"""
To compute Hessian matrix, y, s are needed.
g = (1/N)*∑(0.25 * wx - 0.5 * y) * x
y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x
define forward_hess = (0.25 * x * s)
"""
hess_vector = self.compute_gradient(data_instances,
forward_hess,
delta_s.fit_intercept)
return np.array(hess_vector)
def remote_host_forward(self, host_forward, suffix=tuple()):
self.host_forward_transfer.remote(obj=host_forward, role=consts.GUEST, idx=0, suffix=suffix)
def get_fore_gradient(self, suffix=tuple()):
host_forward = self.fore_gradient_transfer.get(idx=0, suffix=suffix)
return host_forward
def update_gradient(self, unilateral_gradient, suffix=tuple()):
self.unilateral_gradient_transfer.remote(unilateral_gradient, role=consts.ARBITER, idx=0, suffix=suffix)
optimized_gradient = self.unilateral_optim_gradient_transfer.get(idx=0, suffix=suffix)
return optimized_gradient
class Arbiter(HeteroGradientBase):
def __init__(self):
super().__init__()
self.has_multiple_hosts = False
def _register_gradient_sync(self, guest_gradient_transfer, host_gradient_transfer,
guest_optim_gradient_transfer, host_optim_gradient_transfer):
self.guest_gradient_transfer = guest_gradient_transfer
self.host_gradient_transfer = host_gradient_transfer
self.guest_optim_gradient_transfer = guest_optim_gradient_transfer
self.host_optim_gradient_transfer = host_optim_gradient_transfer
def compute_gradient_procedure(self, cipher, optimizer, n_iter_, batch_index):
"""
Compute gradients.
Received local_gradients from guest and hosts. Merge and optimize, then separate and remote back.
Parameters
----------
cipher: Use for encryption
optimizer: optimizer that get delta gradient of this iter
n_iter_: int, current iter nums
batch_index: int
"""
current_suffix = (n_iter_, batch_index)
host_gradients, guest_gradient = self.get_local_gradient(current_suffix)
if len(host_gradients) > 1:
self.has_multiple_hosts = True
host_gradients = [np.array(h) for h in host_gradients]
guest_gradient = np.array(guest_gradient)
size_list = [h_g.shape[0] for h_g in host_gradients]
size_list.append(guest_gradient.shape[0])
gradient = np.hstack((h for h in host_gradients))
gradient = np.hstack((gradient, guest_gradient))
grad = np.array(cipher.decrypt_list(gradient))
# LOGGER.debug("In arbiter compute_gradient_procedure, before apply grad: {}, size_list: {}".format(
# grad, size_list
# ))
delta_grad = optimizer.apply_gradients(grad)
# LOGGER.debug("In arbiter compute_gradient_procedure, delta_grad: {}".format(
# delta_grad
# ))
separate_optim_gradient = self.separate(delta_grad, size_list)
# LOGGER.debug("In arbiter compute_gradient_procedure, separated gradient: {}".format(
# separate_optim_gradient
# ))
host_optim_gradients = separate_optim_gradient[: -1]
guest_optim_gradient = separate_optim_gradient[-1]
self.remote_local_gradient(host_optim_gradients, guest_optim_gradient, current_suffix)
return delta_grad
@staticmethod
def separate(value, size_list):
"""
Separate value in order to several set according size_list
Parameters
----------
value: list or ndarray, input data
size_list: list, each set size
Returns
----------
list
set after separate
"""
separate_res = []
cur = 0
for size in size_list:
separate_res.append(value[cur:cur + size])
cur += size
return separate_res
def get_local_gradient(self, suffix=tuple()):
host_gradients = self.host_gradient_transfer.get(idx=-1, suffix=suffix)
LOGGER.info("Get host_gradient from Host")
guest_gradient = self.guest_gradient_transfer.get(idx=0, suffix=suffix)
LOGGER.info("Get guest_gradient from Guest")
return host_gradients, guest_gradient
def remote_local_gradient(self, host_optim_gradients, guest_optim_gradient, suffix=tuple()):
for idx, host_optim_gradient in enumerate(host_optim_gradients):
self.host_optim_gradient_transfer.remote(host_optim_gradient,
role=consts.HOST,
idx=idx,
suffix=suffix)
self.guest_optim_gradient_transfer.remote(guest_optim_gradient,
role=consts.GUEST,
idx=0,
suffix=suffix)
| 20,230 | 42.136461 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/hetero_sqn_gradient.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.optim.gradient import sqn_sync
from federatedml.param.sqn_param import StochasticQuasiNewtonParam
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroStochasticQuansiNewton(hetero_linear_model_gradient.HeteroGradientBase):
def __init__(self, sqn_param: StochasticQuasiNewtonParam):
self.gradient_computer = None
self.transfer_variable = None
self.sqn_sync = None
self.n_iter = 0
self.count_t = -1
self.__total_batch_nums = 0
self.batch_index = 0
self.last_w_tilde: LinearModelWeights = None
self.this_w_tilde: LinearModelWeights = None
# self.sqn_param = sqn_param
self.update_interval_L = sqn_param.update_interval_L
self.memory_M = sqn_param.memory_M
self.sample_size = sqn_param.sample_size
self.random_seed = sqn_param.random_seed
self.raise_weight_overflow_error = True
def unset_raise_weight_overflow_error(self):
self.raise_weight_overflow_error = False
@property
def iter_k(self):
return self.n_iter * self.__total_batch_nums + self.batch_index + 1
def set_total_batch_nums(self, total_batch_nums):
self.__total_batch_nums = total_batch_nums
def register_gradient_computer(self, gradient_computer):
self.gradient_computer = copy.deepcopy(gradient_computer)
def register_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
self.sqn_sync.register_transfer_variable(self.transfer_variable)
def _renew_w_tilde(self):
self.last_w_tilde = self.this_w_tilde
self.this_w_tilde = LinearModelWeights(np.zeros_like(self.last_w_tilde.unboxed),
self.last_w_tilde.fit_intercept,
raise_overflow_error=self.raise_weight_overflow_error)
def _update_hessian(self, *args):
raise NotImplementedError("Should not call here")
def _update_w_tilde(self, model_weights):
if self.this_w_tilde is None:
self.this_w_tilde = copy.deepcopy(model_weights)
else:
self.this_w_tilde += model_weights
def compute_gradient_procedure(self, *args, **kwargs):
data_instances = args[0]
cipher = args[1]
model_weights = args[2]
optimizer = args[3]
self.batch_index = args[5]
self.n_iter = args[4]
gradient_results = self.gradient_computer.compute_gradient_procedure(*args)
self._update_w_tilde(model_weights)
if self.iter_k % self.update_interval_L == 0:
self.count_t += 1
# LOGGER.debug("Before division, this_w_tilde: {}".format(self.this_w_tilde.unboxed))
self.this_w_tilde /= self.update_interval_L
# LOGGER.debug("After division, this_w_tilde: {}".format(self.this_w_tilde.unboxed))
if self.count_t > 0:
LOGGER.info("iter_k: {}, count_t: {}, start to update hessian".format(self.iter_k, self.count_t))
self._update_hessian(data_instances, optimizer, cipher)
self.last_w_tilde = self.this_w_tilde
self.this_w_tilde = LinearModelWeights(np.zeros_like(self.last_w_tilde.unboxed),
self.last_w_tilde.fit_intercept,
raise_overflow_error=self.raise_weight_overflow_error)
# LOGGER.debug("After replace, last_w_tilde: {}, this_w_tilde: {}".format(self.last_w_tilde.unboxed,
# self.this_w_tilde.unboxed))
return gradient_results
def compute_loss(self, *args, **kwargs):
loss = self.gradient_computer.compute_loss(*args)
return loss
class HeteroStochasticQuansiNewtonGuest(HeteroStochasticQuansiNewton):
def __init__(self, sqn_param):
super().__init__(sqn_param)
self.sqn_sync = sqn_sync.Guest()
def _update_hessian(self, data_instances, optimizer, cipher_operator):
suffix = (self.n_iter, self.batch_index)
sampled_data = self.sqn_sync.sync_sample_data(data_instances, self.sample_size, self.random_seed, suffix=suffix)
delta_s = self.this_w_tilde - self.last_w_tilde
host_forwards = self.sqn_sync.get_host_forwards(suffix=suffix)
forward_hess, hess_vector = self.gradient_computer.compute_forward_hess(sampled_data, delta_s, host_forwards)
self.sqn_sync.remote_forward_hess(forward_hess, suffix)
hess_norm = optimizer.hess_vector_norm(delta_s)
# LOGGER.debug("In _update_hessian, hess_norm: {}".format(hess_norm.unboxed))
hess_vector = hess_vector + hess_norm.unboxed
self.sqn_sync.sync_hess_vector(hess_vector, suffix)
class HeteroStochasticQuansiNewtonHost(HeteroStochasticQuansiNewton):
def __init__(self, sqn_param):
super().__init__(sqn_param)
self.sqn_sync = sqn_sync.Host()
def _update_hessian(self, data_instances, optimizer, cipher_operator):
suffix = (self.n_iter, self.batch_index)
sampled_data = self.sqn_sync.sync_sample_data(data_instances, suffix=suffix)
delta_s = self.this_w_tilde - self.last_w_tilde
# LOGGER.debug("In _update_hessian, delta_s: {}".format(delta_s.unboxed))
host_forwards = self.gradient_computer.compute_sqn_forwards(sampled_data, delta_s, cipher_operator)
# host_forwards = cipher_operator.encrypt_list(host_forwards)
self.sqn_sync.remote_host_forwards(host_forwards, suffix=suffix)
forward_hess = self.sqn_sync.get_forward_hess(suffix=suffix)
hess_vector = self.gradient_computer.compute_forward_hess(sampled_data, delta_s, forward_hess)
hess_vector += optimizer.hess_vector_norm(delta_s).unboxed
self.sqn_sync.sync_hess_vector(hess_vector, suffix)
class HeteroStochasticQuansiNewtonArbiter(HeteroStochasticQuansiNewton):
def __init__(self, sqn_param):
super().__init__(sqn_param)
self.opt_Hess = None
self.opt_v = None
self.opt_s = None
self.sqn_sync = sqn_sync.Arbiter()
self.model_weight: LinearModelWeights = None
def _update_w_tilde(self, gradient: LinearModelWeights):
if self.model_weight is None:
self.model_weight = copy.deepcopy(gradient)
else:
self.model_weight -= gradient
if self.this_w_tilde is None:
self.this_w_tilde = copy.deepcopy(self.model_weight)
else:
self.this_w_tilde += self.model_weight
def compute_gradient_procedure(self, cipher_operator, optimizer, n_iter_, batch_index):
self.batch_index = batch_index
self.n_iter = n_iter_
# LOGGER.debug("In compute_gradient_procedure, n_iter: {}, batch_index: {}, iter_k: {}".format(
# self.n_iter, self.batch_index, self.iter_k
# ))
optimizer.set_hess_matrix(self.opt_Hess)
delta_grad = self.gradient_computer.compute_gradient_procedure(
cipher_operator, optimizer, n_iter_, batch_index)
self._update_w_tilde(LinearModelWeights(delta_grad,
fit_intercept=False,
raise_overflow_error=self.raise_weight_overflow_error))
if self.iter_k % self.update_interval_L == 0:
self.count_t += 1
# LOGGER.debug("Before division, this_w_tilde: {}".format(self.this_w_tilde.unboxed))
self.this_w_tilde /= self.update_interval_L
# LOGGER.debug("After division, this_w_tilde: {}".format(self.this_w_tilde.unboxed))
if self.count_t > 0:
LOGGER.info("iter_k: {}, count_t: {}, start to update hessian".format(self.iter_k, self.count_t))
self._update_hessian(cipher_operator)
self.last_w_tilde = self.this_w_tilde
self.this_w_tilde = LinearModelWeights(np.zeros_like(self.last_w_tilde.unboxed),
self.last_w_tilde.fit_intercept,
raise_overflow_error=self.raise_weight_overflow_error)
return delta_grad
# self._update_w_tilde(cipher_operator)
def _update_hessian(self, cipher_operator):
suffix = (self.n_iter, self.batch_index)
hess_vectors = self.sqn_sync.sync_hess_vector(suffix)
hess_vectors = np.array(cipher_operator.decrypt_list(hess_vectors))
delta_s = self.this_w_tilde - self.last_w_tilde
# LOGGER.debug("In update hessian, hess_vectors: {}, delta_s: {}".format(
# hess_vectors, delta_s.unboxed
# ))
self.opt_v = self._update_memory_vars(hess_vectors, self.opt_v)
self.opt_s = self._update_memory_vars(delta_s.unboxed, self.opt_s)
self._compute_hess_matrix()
def _update_memory_vars(self, new_vars, memory_vars):
if isinstance(new_vars, list):
new_vars = np.array(new_vars)
if memory_vars is None:
memory_vars = [0, ]
memory_vars[0] = new_vars.reshape(-1, 1)
elif len(memory_vars) < self.memory_M:
memory_vars.append(new_vars.reshape(-1, 1))
else:
memory_vars.pop(0)
memory_vars.append(new_vars.reshape(-1, 1))
return memory_vars
def _compute_hess_matrix(self):
# LOGGER.debug("opt_v: {}, opt_s: {}".format(self.opt_v, self.opt_s))
rho = sum(self.opt_v[-1] * self.opt_s[-1]) / sum(self.opt_v[-1] * self.opt_v[-1])
# LOGGER.debug("in _compute_hess_matrix, rho0 = {}".format(rho))
n = self.opt_s[0].shape[0]
Hess = rho * np.identity(n)
iter_num = 0
for y, s in zip(self.opt_v, self.opt_s):
rho = 1.0 / (y.T.dot(s))
Hess = (np.identity(n) - rho * s.dot(y.T)).dot(Hess).dot(np.identity(n) - rho * y.dot(s.T)) + rho * s.dot(
s.T)
iter_num += 1
# LOGGER.info(
# "hessian updating algorithm iter_num = {}, rho = {} \n ||s|| is {} \n ||y|| is {}".format(iter_num, rho,
# np.linalg.norm(
# s),
# np.linalg.norm(
# y)))
self.opt_Hess = Hess
def sqn_factory(role, sqn_param):
if role == consts.GUEST:
return HeteroStochasticQuansiNewtonGuest(sqn_param)
if role == consts.HOST:
return HeteroStochasticQuansiNewtonHost(sqn_param)
return HeteroStochasticQuansiNewtonArbiter(sqn_param)
| 11,855 | 44.953488 | 123 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/hetero_linr_gradient_and_loss.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from federatedml.framework.hetero.sync import loss_sync
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.util import LOGGER
from federatedml.util.fate_operator import reduce_add, vec_dot
class Guest(hetero_linear_model_gradient.Guest, loss_sync.Guest):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward,
transfer_variables.fore_gradient,
transfer_variables.guest_gradient,
transfer_variables.guest_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_half_d(self, data_instances, w, cipher, batch_index, current_suffix):
if self.use_sample_weight:
self.half_d = data_instances.mapValues(
lambda v: (vec_dot(v.features, w.coef_) + w.intercept_ - v.label) * v.weight)
else:
self.half_d = data_instances.mapValues(
lambda v: vec_dot(v.features, w.coef_) + w.intercept_ - v.label)
return self.half_d
def compute_and_aggregate_forwards(self, data_instances, half_g, encrypted_half_g, batch_index,
current_suffix, offset=None):
"""
gradient = (1/N)*sum(wx - y) * x
Define wx -y as guest_forward and wx as host_forward
"""
self.host_forwards = self.get_host_forward(suffix=current_suffix)
return self.host_forwards
def compute_loss(self, data_instances, n_iter_, batch_index, loss_norm=None):
'''
Compute hetero linr loss:
loss = (1/N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
log(wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2*(wx_h + wx_g - y)
'''
current_suffix = (n_iter_, batch_index)
n = data_instances.count()
loss_list = []
host_wx_squares = self.get_host_loss_intermediate(current_suffix)
if loss_norm is not None:
host_loss_regular = self.get_host_loss_regular(suffix=current_suffix)
else:
host_loss_regular = []
if len(self.host_forwards) > 1:
LOGGER.info("More than one host exist, loss is not available")
else:
host_forward = self.host_forwards[0]
host_wx_square = host_wx_squares[0]
wxy_square = self.half_d.mapValues(lambda x: np.square(x)).reduce(reduce_add)
loss_gh = self.half_d.join(host_forward, lambda g, h: g * h).reduce(reduce_add)
loss = (wxy_square + host_wx_square + 2 * loss_gh) / (2 * n)
if loss_norm is not None:
loss = loss + loss_norm + host_loss_regular[0]
loss_list.append(loss)
# LOGGER.debug("In compute_loss, loss list are: {}".format(loss_list))
self.sync_loss_info(loss_list, suffix=current_suffix)
def compute_forward_hess(self, data_instances, delta_s, host_forwards):
"""
To compute Hessian matrix, y, s are needed.
g = (1/N)*∑(wx - y) * x
y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(x * s) * x
define forward_hess = (1/N)*∑(x * s)
"""
forwards = data_instances.mapValues(
lambda v: (vec_dot(v.features, delta_s.coef_) + delta_s.intercept_))
for host_forward in host_forwards:
forwards = forwards.join(host_forward, lambda g, h: g + h)
if self.use_sample_weight:
forwards = forwards.join(data_instances, lambda h, d: h * d.weight)
hess_vector = self.compute_gradient(data_instances,
forwards,
delta_s.fit_intercept)
return forwards, np.array(hess_vector)
class Host(hetero_linear_model_gradient.Host, loss_sync.Host):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.host_forward,
transfer_variables.fore_gradient,
transfer_variables.host_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.host_loss_regular,
transfer_variables.loss,
transfer_variables.loss_intermediate)
def compute_forwards(self, data_instances, model_weights):
wx = data_instances.mapValues(
lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_)
return wx
def compute_half_g(self, data_instances, w, cipher, batch_index):
half_g = data_instances.mapValues(
lambda v: vec_dot(v.features, w.coef_) + w.intercept_)
encrypt_half_g = cipher[batch_index].encrypt(half_g)
return half_g, encrypt_half_g
def compute_loss(self, model_weights, optimizer, n_iter_, batch_index, cipher_operator):
'''
Compute htero linr loss for:
loss = (1/2N)*\\sum(wx-y)^2 where y is label, w is model weight and x is features
Note: (wx - y)^2 = (wx_h)^2 + (wx_g - y)^2 + 2*(wx_h + (wx_g - y))
'''
current_suffix = (n_iter_, batch_index)
self_wx_square = self.forwards.mapValues(lambda x: np.square(x)).reduce(reduce_add)
en_wx_square = cipher_operator.encrypt(self_wx_square)
self.remote_loss_intermediate(en_wx_square, suffix=current_suffix)
loss_regular = optimizer.loss_norm(model_weights)
if loss_regular is not None:
en_loss_regular = cipher_operator.encrypt(loss_regular)
self.remote_loss_regular(en_loss_regular, suffix=current_suffix)
class Arbiter(hetero_linear_model_gradient.Arbiter, loss_sync.Arbiter):
def register_gradient_procedure(self, transfer_variables):
self._register_gradient_sync(transfer_variables.guest_gradient,
transfer_variables.host_gradient,
transfer_variables.guest_optim_gradient,
transfer_variables.host_optim_gradient)
self._register_loss_sync(transfer_variables.loss)
def compute_loss(self, cipher, n_iter_, batch_index):
"""
Decrypt loss from guest
"""
current_suffix = (n_iter_, batch_index)
loss_list = self.sync_loss_info(suffix=current_suffix)
de_loss_list = cipher.decrypt_list(loss_list)
return de_loss_list
| 7,414 | 44.213415 | 99 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/test/homo_lr_gradient_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.optim.gradient.homo_lr_gradient import LogisticGradient, TaylorLogisticGradient
from federatedml.secureprotol import PaillierEncrypt
class TestHomoLRGradient(unittest.TestCase):
def setUp(self):
self.paillier_encrypt = PaillierEncrypt()
self.paillier_encrypt.generate_key()
self.gradient_operator = LogisticGradient()
self.taylor_operator = TaylorLogisticGradient()
self.X = np.array([[1, 2, 3, 4, 5], [3, 2, 4, 5, 1], [2, 2, 3, 1, 1, ]]) / 10
self.X1 = np.c_[self.X, np.ones(3)]
self.Y = np.array([[1], [1], [-1]])
self.values = []
for idx, x in enumerate(self.X):
inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
self.values.append((idx, inst))
self.values1 = []
for idx, x in enumerate(self.X1):
inst = Instance(inst_id=idx, features=x, label=self.Y[idx])
self.values1.append((idx, inst))
self.coef = np.array([2, 2.3, 3, 4, 2.1]) / 10
self.coef1 = np.append(self.coef, [1])
def test_gradient_length(self):
fit_intercept = False
grad = self.gradient_operator.compute_gradient(self.values, self.coef, 0, fit_intercept)
self.assertEqual(grad.shape[0], self.X.shape[1])
taylor_grad = self.taylor_operator.compute_gradient(self.values, self.coef, 0, fit_intercept)
self.assertEqual(taylor_grad.shape[0], self.X.shape[1])
self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)
fit_intercept = True
grad = self.gradient_operator.compute_gradient(self.values, self.coef, 0, fit_intercept)
self.assertEqual(grad.shape[0], self.X.shape[1] + 1)
taylor_grad = self.taylor_operator.compute_gradient(self.values, self.coef, 0, fit_intercept)
self.assertEqual(taylor_grad.shape[0], self.X.shape[1] + 1)
self.assertTrue(np.sum(grad - taylor_grad) < 0.0001)
if __name__ == '__main__':
unittest.main()
| 2,692 | 36.402778 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.optim.gradient.logistic_gradient import LogisticGradient, HeteroLogisticGradient, TaylorLogisticGradient
__all__ = ["LogisticGradient", "HeteroLogisticGradient", "TaylorLogisticGradient"]
| 823 | 40.2 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/test/hetero_lr_gradient_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.optim.gradient import hetero_linear_model_gradient
from federatedml.optim.gradient import hetero_lr_gradient_and_loss
from federatedml.secureprotol import PaillierEncrypt
class TestHeteroLogisticGradient(unittest.TestCase):
def setUp(self):
self.paillier_encrypt = PaillierEncrypt()
self.paillier_encrypt.generate_key()
# self.hetero_lr_gradient = HeteroLogisticGradient(self.paillier_encrypt)
self.hetero_lr_gradient = hetero_lr_gradient_and_loss.Guest()
size = 10
self.en_wx = session.parallelize([self.paillier_encrypt.encrypt(i) for i in range(size)],
partition=48,
include_key=False)
# self.en_wx = session.parallelize([self.paillier_encrypt.encrypt(i) for i in range(size)])
self.en_sum_wx_square = session.parallelize([self.paillier_encrypt.encrypt(np.square(i)) for i in range(size)],
partition=48,
include_key=False)
self.wx = np.array([i for i in range(size)])
self.w = self.wx / np.array([1 for _ in range(size)])
self.data_inst = session.parallelize(
[Instance(features=np.array([1 for _ in range(size)]), label=pow(-1, i % 2)) for i in range(size)],
partition=48, include_key=False)
# test fore_gradient
self.fore_gradient_local = [-0.5, 0.75, 0, 1.25, 0.5, 1.75, 1, 2.25, 1.5, 2.75]
# test gradient
self.gradient = [1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125]
self.gradient_fit_intercept = [1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125, 1.125]
self.loss = 4.505647
def test_compute_partition_gradient(self):
fore_gradient = self.en_wx.join(self.data_inst, lambda wx, d: 0.25 * wx - 0.5 * d.label)
sparse_data = self._make_sparse_data()
gradient_computer = hetero_linear_model_gradient.HeteroGradientBase()
for fit_intercept in [True, False]:
dense_result = gradient_computer.compute_gradient(self.data_inst, fore_gradient, fit_intercept)
dense_result = [self.paillier_encrypt.decrypt(iterator) for iterator in dense_result]
if fit_intercept:
self.assertListEqual(dense_result, self.gradient_fit_intercept)
else:
self.assertListEqual(dense_result, self.gradient)
sparse_result = gradient_computer.compute_gradient(sparse_data, fore_gradient, fit_intercept)
sparse_result = [self.paillier_encrypt.decrypt(iterator) for iterator in sparse_result]
self.assertListEqual(dense_result, sparse_result)
def _make_sparse_data(self):
def trans_sparse(instance):
dense_features = instance.features
indices = [i for i in range(len(dense_features))]
sparse_features = SparseVector(indices=indices, data=dense_features, shape=len(dense_features))
return Instance(inst_id=None,
features=sparse_features,
label=instance.label)
return self.data_inst.mapValues(trans_sparse)
if __name__ == "__main__":
session.init("1111")
unittest.main()
session.stop()
| 4,187 | 45.533333 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/optim/gradient/test/gradient_method_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import unittest
import numpy as np
import pandas as pd
from federatedml.util import fate_operator
def go_fast(a): # Function is compiled and runs in machine code
sum = 0
for j in range(100000):
trace = 0
for i in range(a.shape[0]):
trace += np.tanh(a[i, i])
sum += trace
print(sum)
return sum
class TestHomoLRGradient(unittest.TestCase):
def setUp(self):
# home_dir = os.path.split(os.path.realpath(__file__))[0]
# data_dir = home_dir + '/../../../../../examples/data/breast_hetero_guest.csv'
# data_df = pd.read_csv(data_dir)
# self.X = np.array(data_df.iloc[:, 2:])
# self.Y = np.array(data_df.iloc[:, 1])
# self.Y = self.Y.reshape([-1, 1])
self.X = np.random.random((569, 30))
self.Y = np.random.randint(low=0, high=2, size=(569, 1))
self.coef = np.zeros(self.X.shape[1])
self.intercept = 0
self.fit_intercept = True
def test_compute_time(self):
x = np.arange(10000).reshape(100, 100)
start_time = time.time()
grad = self._test_compute(self.X, self.Y, self.coef, self.intercept, self.fit_intercept)
# go_fast(x)
end_time = time.time()
print("compute time: {}".format(end_time - start_time)) # without jit: 6.935, with jit: 6.684
# add jit in dot 7.271
# add jit in dot only: 7.616
pass
def _test_compute(self, X, Y, coef, intercept, fit_intercept):
batch_size = len(X)
if batch_size == 0:
return None, None
one_d_y = Y.reshape([-1, ])
d = (0.25 * np.array(fate_operator.dot(X, coef) + intercept).transpose() + 0.5 * one_d_y * -1)
grad_batch = X.transpose() * d
tot_loss = np.log(1 + np.exp(np.multiply(-Y.transpose(), X.dot(coef) + intercept))).sum()
avg_loss = tot_loss / Y.shape[0]
# grad_batch = grad_batch.transpose()
# if fit_intercept:
# grad_batch = np.c_[grad_batch, d]
# grad = sum(grad_batch) / batch_size
return 0
if __name__ == '__main__':
unittest.main()
| 3,430 | 31.67619 | 102 |
py
|
FATE
|
FATE-master/python/federatedml/unsupervised_learning/kmeans/kmeans_model_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fate_arch.common import log
from federatedml.model_base import ModelBase
from federatedml.param.hetero_kmeans_param import KmeansParam
from federatedml.protobuf.generated import hetero_kmeans_meta_pb2, hetero_kmeans_param_pb2
from federatedml.transfer_variable.transfer_class.hetero_kmeans_transfer_variable import HeteroKmeansTransferVariable
from federatedml.util import abnormal_detection
from federatedml.feature.instance import Instance
from federatedml.util import consts
import functools
LOGGER = log.getLogger()
class BaseKmeansModel(ModelBase):
def __init__(self):
super(BaseKmeansModel, self).__init__()
self.model_param = KmeansParam()
self.n_iter_ = 0
self.k = 0
self.max_iter = 0
self.tol = 0
self.random_stat = None
self.iter = iter
self.centroid_list = None
self.cluster_result = None
self.transfer_variable = HeteroKmeansTransferVariable()
self.model_name = 'toSet'
self.model_param_name = 'HeteroKmeansParam'
self.model_meta_name = 'HeteroKmeansMeta'
self.header = None
self.reset_union()
self.is_converged = False
self.cluster_detail = None
self.cluster_count = None
self.aggregator = None
def _init_model(self, params):
self.model_param = params
self.k = params.k
self.max_iter = params.max_iter
self.tol = params.tol
self.random_stat = params.random_stat
# self.aggregator.register_aggregator(self.transfer_variable)
def get_header(self, data_instances):
if self.header is not None:
return self.header
return data_instances.schema.get("header")
def _get_meta(self):
meta_protobuf_obj = hetero_kmeans_meta_pb2.KmeansModelMeta(k=self.model_param.k,
tol=self.model_param.tol,
max_iter=self.max_iter)
return meta_protobuf_obj
def _get_param(self):
header = self.header
LOGGER.debug("In get_param, header: {}".format(header))
if header is None:
param_protobuf_obj = hetero_kmeans_param_pb2.KmeansModelParam()
return param_protobuf_obj
cluster_detail = [hetero_kmeans_param_pb2.Clusterdetail(cluster=cluster) for cluster in self.cluster_count]
centroid_detail = [hetero_kmeans_param_pb2.Centroiddetail(centroid=centroid) for centroid in self.centroid_list]
param_protobuf_obj = hetero_kmeans_param_pb2.KmeansModelParam(count_of_clusters=self.k,
max_interation=self.n_iter_,
converged=self.is_converged,
cluster_detail=cluster_detail,
centroid_detail=centroid_detail,
header=self.header)
return param_protobuf_obj
def export_model(self):
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result
def count(self, iterator):
count_result = dict()
for k, v in iterator:
if v not in count_result:
count_result[v] = 1
else:
count_result[v] += 1
return count_result
@staticmethod
def sum_dict(d1, d2):
temp = dict()
for key in d1.keys() | d2.keys():
temp[key] = sum([d.get(key, 0) for d in (d1, d2)])
return temp
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
def load_model(self, model_dict):
param_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
self.k = meta_obj.k
self.centroid_list = list(param_obj.centroid_detail)
for idx, c in enumerate(self.centroid_list):
self.centroid_list[idx] = list(c.centroid)
self.cluster_count = list(param_obj.cluster_detail)
for idx, c in enumerate(self.cluster_count):
self.cluster_count[idx] = list(c.cluster)
# self.header = list(result_obj.header)
# if self.header is None:
# return
def reset_union(self):
def _add_name(inst, name):
return Instance(features=inst.features + [name], inst_id=inst.inst_id)
def kmeans_union(previews_data, name_list):
if len(previews_data) == 0:
return None
if any([x is None for x in previews_data]):
return None
# assert len(previews_data) == len(name_list)
if self.role == consts.ARBITER:
data_outputs = []
for data_output, name in zip(previews_data, name_list):
f = functools.partial(_add_name, name=name)
data_output1 = data_output[0].mapValues(f)
data_output2 = data_output[1].mapValues(f)
data_outputs.append([data_output1, data_output2])
else:
data_output1 = sub_union(previews_data, name_list)
data_outputs = [data_output1, None]
return data_outputs
def sub_union(data_output, name_list):
result_data = None
for data, name in zip(data_output, name_list):
# LOGGER.debug("before mapValues, one data: {}".format(data.first()))
f = functools.partial(_add_name, name=name)
data = data.mapValues(f)
# LOGGER.debug("after mapValues, one data: {}".format(data.first()))
if result_data is None:
result_data = data
else:
LOGGER.debug(f"Before union, t1 count: {result_data.count()}, t2 count: {data.count()}")
result_data = result_data.union(data)
LOGGER.debug(f"After union, result count: {result_data.count()}")
# LOGGER.debug("before out loop, one data: {}".format(result_data.first()))
return result_data
self.component_properties.set_union_func(kmeans_union)
def set_predict_data_schema(self, predict_datas, schemas):
if predict_datas is None:
return None, None
predict_data = predict_datas[0][0]
schema = schemas[0]
if self.role == consts.ARBITER:
data_output1 = predict_data[0]
data_output2 = predict_data[1]
if data_output1 is not None:
data_output1.schema = {
"header": ["cluster_sample_count", "cluster_inner_dist", "inter_cluster_dist", "type"],
"sid": "cluster_index",
"content_type": "cluster_result"
}
if data_output2 is not None:
data_output2.schema = {"header": ["predicted_cluster_index", "distance", "type"],
"sid": "id",
"content_type": "cluster_result"}
predict_datas = [data_output1, data_output2]
else:
data_output = predict_data
if data_output is not None:
data_output.schema = {"header": ["label", "predicted_label", "type"],
"sid": schema.get('sid'),
"content_type": "cluster_result"}
if "match_id_name" in schema:
data_output.schema["match_id_name"] = schema["match_id_name"]
predict_datas = [data_output, None]
return predict_datas
| 8,879 | 40.111111 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/unsupervised_learning/kmeans/hetero_kmeans/hetero_kmeans_arbiter.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.evaluation.metrics import clustering_metric
from federatedml.feature.instance import Instance
from federatedml.param.hetero_kmeans_param import KmeansParam
from federatedml.unsupervised_learning.kmeans.kmeans_model_base import BaseKmeansModel
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorServer
from federatedml.util import LOGGER
from federatedml.util import consts
class HeteroKmeansArbiter(BaseKmeansModel):
def __init__(self):
super(HeteroKmeansArbiter, self).__init__()
self.model_param = KmeansParam()
self.DBI = 0
self.aggregator = SecureAggregatorServer(True, 'kmeans')
def callback_dbi(self, iter_num, dbi):
metric_meta = MetricMeta(name='train',
metric_type="DBI",
extra_metas={
"unit_name": "iters",
})
self.callback_meta(metric_name='DBI', metric_namespace='train', metric_meta=metric_meta)
self.callback_metric(metric_name='DBI',
metric_namespace='train',
metric_data=[Metric(iter_num, dbi)])
def sum_in_cluster(self, iterator):
sum_result = dict()
for k, v in iterator:
if v[1] not in sum_result:
sum_result[v[1]] = np.sqrt(v[0][v[1]])
else:
sum_result[v[1]] += np.sqrt(v[0][v[1]])
return sum_result
def cal_ave_dist(self, dist_cluster_table, cluster_result):
dist_centroid_dist_table = dist_cluster_table.applyPartitions(self.sum_in_cluster).reduce(self.sum_dict)
cluster_count = cluster_result.applyPartitions(self.count).reduce(self.sum_dict)
cal_ave_dist_list = []
for key in cluster_count.keys():
count = cluster_count[key]
cal_ave_dist_list.append([key, count, dist_centroid_dist_table[key] / count])
return cal_ave_dist_list
@staticmethod
def max_radius(iterator):
radius_result = dict()
for k, v in iterator:
if v[0] not in radius_result:
radius_result[v[0]] = v[1]
elif v[1] >= radius_result[v[0]]:
radius_result[v[0]] = v[1]
return radius_result
@staticmethod
def get_max_radius(v1, v2):
rs = {}
for k1 in v1.keys() | v2.keys():
rs[k1] = max(v1.get(k1, 0), v2.get(k1, 0))
return rs
def cal_dbi(self, dist_sum, cluster_result, suffix):
dist_cluster_table = dist_sum.join(cluster_result, lambda v1, v2: [v1, v2])
dist_table = self.cal_ave_dist(dist_cluster_table, cluster_result) # ave dist in each cluster
if len(dist_table) == 1:
raise ValueError('Only one class detected. DBI calculation error')
cluster_dist = self.aggregator.aggregate_model(suffix=('cluster_dist', suffix,))
cluster_avg_intra_dist = []
for i in range(len(dist_table)):
cluster_avg_intra_dist.append(dist_table[i][2])
self.DBI = clustering_metric.DaviesBouldinIndex.compute(self, cluster_avg_intra_dist,
list(cluster_dist._weights))
self.callback_dbi(suffix - 1, self.DBI)
def fit(self, data_instances=None, validate_data=None):
LOGGER.info("Enter hetero Kmeans arbiter fit")
last_cluster_result = None
while self.n_iter_ < self.max_iter:
dist_sum = self.aggregator.aggregate_model(suffix=(self.n_iter_,))
if last_cluster_result is not None:
self.cal_dbi(dist_sum, last_cluster_result, self.n_iter_)
cluster_result = dist_sum.mapValues(lambda v: np.argmin(v))
self.aggregator.broadcast_model(cluster_result, suffix=(self.n_iter_,))
tol1 = self.transfer_variable.guest_tol.get(idx=0, suffix=(self.n_iter_,))
tol2 = self.transfer_variable.host_tol.get(idx=0, suffix=(self.n_iter_,))
tol_final = tol1 + tol2
self.is_converged = True if tol_final < self.tol else False
LOGGER.debug(f"iter: {self.n_iter_}, tol_final: {tol_final}, tol: {self.tol},"
f" is_converge: {self.is_converged}")
self.transfer_variable.arbiter_tol.remote(self.is_converged, role=consts.HOST, idx=-1,
suffix=(self.n_iter_,))
self.transfer_variable.arbiter_tol.remote(self.is_converged, role=consts.GUEST, idx=0,
suffix=(self.n_iter_,))
last_cluster_result = cluster_result
self.n_iter_ += 1
if self.is_converged:
break
# calculate finall round dbi
dist_sum = self.aggregator.aggregate_model(suffix=(self.n_iter_,))
cluster_result = dist_sum.mapValues(lambda v: np.argmin(v))
self.aggregator.broadcast_model(cluster_result, suffix=(self.n_iter_,))
self.cal_dbi(dist_sum, last_cluster_result, self.n_iter_)
dist_sum_dbi = self.aggregator.aggregate_model(suffix=(self.n_iter_ + 1, ))
self.aggregator.broadcast_model(cluster_result, suffix=(self.n_iter_ + 1,))
self.cal_dbi(dist_sum_dbi, cluster_result, self.n_iter_ + 1)
def predict(self, data_instances=None):
LOGGER.info("Start predict ...")
res_dict = self.aggregator.aggregate_model(suffix=('predict', ))
cluster_result = res_dict.mapValues(lambda v: np.argmin(v))
cluster_dist_result = res_dict.mapValues(lambda v: min(v))
self.aggregator.broadcast_model(cluster_result, suffix=('predict', ))
res_dict_dbi = self.aggregator.aggregate_model(suffix=('predict_dbi', ))
self.aggregator.broadcast_model(cluster_result, suffix=('predict_dbi', ))
dist_cluster_table = res_dict.join(cluster_result, lambda v1, v2: [v1, v2])
dist_cluster_table_dbi = res_dict_dbi.join(cluster_result, lambda v1, v2: [v1, v2])
dist_table = self.cal_ave_dist(dist_cluster_table, cluster_result) # ave dist in each cluster
dist_table_dbi = self.cal_ave_dist(dist_cluster_table_dbi, cluster_result)
cluster_dist = self.aggregator.aggregate_model(suffix=('predict_cluster_dist', ))
dist_cluster_table_out = cluster_result.join(cluster_dist_result, lambda v1, v2: [int(v1), float(v2)])
cluster_max_radius = dist_cluster_table_out.applyPartitions(self.max_radius).reduce(self.get_max_radius)
result = []
for i in range(len(dist_table)):
c_key = dist_table[i][0]
result.append(tuple(
[int(c_key),
Instance(features=[dist_table[i][1], dist_table_dbi[i][2],
cluster_max_radius[c_key], list(cluster_dist._weights)])]))
predict_result1 = session.parallelize(result, partition=res_dict.partitions, include_key=True)
predict_result2 = dist_cluster_table_out.mapValues(lambda x: Instance(features=x))
return predict_result1, predict_result2
| 7,951 | 46.333333 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/unsupervised_learning/kmeans/hetero_kmeans/hetero_kmeans_client.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.framework.weights import NumpyWeights
from federatedml.unsupervised_learning.kmeans.kmeans_model_base import BaseKmeansModel
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient
class HeteroKmeansClient(BaseKmeansModel):
def __init__(self):
super(HeteroKmeansClient, self).__init__()
self.client_dist = None
self.client_tol = None
self.aggregator = SecureAggregatorClient(
secure_aggregate=True, aggregate_type='sum', communicate_match_suffix='kmeans')
@staticmethod
def educl_dist(u, centroid_list):
result = []
for c in centroid_list:
result.append(np.sum(np.square(np.array(c) - u.features)))
return np.array(result)
def get_centroid(self, data_instances):
random_key = []
key = list(data_instances.mapValues(
lambda data_instance: None).collect())
random_list = list(np.random.choice(
data_instances.count(), self.k, replace=False))
for k in random_list:
random_key.append(key[k][0])
return random_key
def cluster_sum(self, iterator):
cluster_result = dict()
for k, v in iterator:
if v[1] not in cluster_result:
cluster_result[v[1]] = v[0]
else:
cluster_result[v[1]] += v[0]
return cluster_result
def centroid_cal(self, cluster_result, data_instances):
cluster_result_table = data_instances.join(
cluster_result, lambda v1, v2: [v1.features, v2])
centroid_feature_sum = cluster_result_table.applyPartitions(
self.cluster_sum).reduce(self.sum_dict)
cluster_count = cluster_result.applyPartitions(
self.count).reduce(self.sum_dict)
centroid_list = []
cluster_count_list = []
count_all = data_instances.count()
for k in range(self.k):
if k not in centroid_feature_sum:
centroid_list.append(self.centroid_list[int(k)])
cluster_count_list.append([k, 0, 0])
else:
count = cluster_count[k]
centroid_list.append(centroid_feature_sum[k] / count)
cluster_count_list.append([k, count, count / count_all])
return centroid_list, cluster_count_list
def centroid_dist(self, centroid_list):
cluster_dist_list = []
for i in range(0, len(centroid_list)):
for j in range(0, len(centroid_list)):
if j != i:
cluster_dist_list.append(
np.sum((np.array(centroid_list[i]) - np.array(centroid_list[j])) ** 2))
return cluster_dist_list
def fit(self, data_instances, validate_data=None):
LOGGER.info("Enter hetero_kmeans_client fit")
self.header = self.get_header(data_instances)
self._abnormal_detection(data_instances)
if self.k > data_instances.count() or self.k < 2:
raise ValueError('K is too larger or too small for current data')
# Get initialized centroid
np.random.seed(self.random_stat)
if self.role == consts.GUEST:
first_centroid_key = self.get_centroid(data_instances)
self.transfer_variable.centroid_list.remote(
first_centroid_key, role=consts.HOST, idx=-1)
else:
first_centroid_key = self.transfer_variable.centroid_list.get(
idx=0)
key_table = session.parallelize(tuple(zip(first_centroid_key, first_centroid_key)),
partition=data_instances.partitions, include_key=True)
centroid_list = list(key_table.join(
data_instances, lambda v1, v2: v2.features).collect())
self.centroid_list = [v[1] for v in centroid_list]
while self.n_iter_ < self.max_iter:
self.send_cluster_dist(self.n_iter_, self.centroid_list)
d = functools.partial(
self.educl_dist, centroid_list=self.centroid_list)
dist_all_table = data_instances.mapValues(d)
LOGGER.debug('sending model, suffix is {}'.format((self.n_iter_)))
self.aggregator.send_model(dist_all_table, suffix=(self.n_iter_, ))
cluster_result = self.aggregator.get_aggregated_model(
suffix=(self.n_iter_, ))
centroid_new, self.cluster_count = self.centroid_cal(
cluster_result, data_instances)
client_tol = np.sum(
np.sum((np.array(self.centroid_list) - np.array(centroid_new)) ** 2, axis=1))
self.client_tol.remote(
client_tol, role=consts.ARBITER, idx=0, suffix=(self.n_iter_,))
self.is_converged = self.transfer_variable.arbiter_tol.get(
idx=0, suffix=(self.n_iter_,))
self.centroid_list = centroid_new
self.cluster_result = cluster_result
self.n_iter_ += 1
LOGGER.info(
f"iter: {self.n_iter_}, is_converged: {self.is_converged}")
if self.is_converged:
break
# calculate final round dbi
self.extra_dbi(data_instances, self.n_iter_, self.centroid_list)
centroid_new, self.cluster_count = self.centroid_cal(
self.cluster_result, data_instances)
self.extra_dbi(data_instances, self.n_iter_ + 1, centroid_new)
# LOGGER.debug(f"Final centroid list: {self.centroid_list}")
def extra_dbi(self, data_instances, suffix, centroids):
d = functools.partial(self.educl_dist, centroid_list=centroids)
dist_all_table = data_instances.mapValues(d)
self.aggregator.send_model(dist_all_table, suffix=(suffix, ))
self.cluster_result = self.aggregator.get_aggregated_model(
suffix=(suffix, ))
self.send_cluster_dist(suffix, centroids)
def send_cluster_dist(self, suffix, centroids):
cluster_dist = self.centroid_dist(centroids)
self.aggregator.send_model(NumpyWeights(
np.array(cluster_dist)), suffix=('cluster_dist', suffix,))
def predict(self, data_instances):
LOGGER.info("Start predict ...")
self.header = self.get_header(data_instances)
self._abnormal_detection(data_instances)
d = functools.partial(
self.educl_dist, centroid_list=self.centroid_list)
dist_all_table = data_instances.mapValues(d)
self.aggregator.send_model(dist_all_table, suffix=('predict', ))
cluster_result = self.aggregator.get_aggregated_model(
suffix=('predict', ))
centroid_new, self.cluster_count = self.centroid_cal(
cluster_result, data_instances)
d = functools.partial(self.educl_dist, centroid_list=centroid_new)
dist_all_table = data_instances.mapValues(d)
self.aggregator.send_model(dist_all_table, suffix=('predict_dbi', ))
cluster_result_dbi = self.aggregator.get_aggregated_model(
suffix=('predict_dbi', ))
cluster_dist = self.centroid_dist(centroid_new)
self.aggregator.send_model(NumpyWeights(
np.array(cluster_dist)), suffix=('predict_cluster_dist', ))
LOGGER.debug(f"first_data: {data_instances.first()[1].__dict__}")
predict_result = data_instances.join(cluster_result, lambda v1, v2: Instance(
features=[v1.label, int(v2)], inst_id=v1.inst_id))
LOGGER.debug(f"predict_data: {predict_result.first()[1].__dict__}")
return predict_result
class HeteroKmeansGuest(HeteroKmeansClient):
def __init__(self):
super(HeteroKmeansGuest, self).__init__()
self.client_dist = self.transfer_variable.guest_dist
self.client_tol = self.transfer_variable.guest_tol
class HeteroKmeansHost(HeteroKmeansClient):
def __init__(self):
super(HeteroKmeansHost, self).__init__()
self.client_dist = self.transfer_variable.host_dist
self.client_tol = self.transfer_variable.host_tol
| 8,900 | 40.985849 | 95 |
py
|
FATE
|
FATE-master/python/federatedml/toy_example/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/toy_example/secure_add_guest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.model_base import ModelBase, ComponentOutput
from federatedml.param.secure_add_example_param import SecureAddExampleParam
from federatedml.transfer_variable.transfer_class.secure_add_example_transfer_variable import \
SecureAddExampleTransferVariable
from federatedml.util import LOGGER
class SecureAddGuest(ModelBase):
def __init__(self):
super(SecureAddGuest, self).__init__()
self.x = None
self.x1 = None
self.x2 = None
self.y1 = None
self.x1_plus_y1 = None
self.data_num = None
self.partition = None
self.seed = None
self.transfer_inst = SecureAddExampleTransferVariable()
self.model_param = SecureAddExampleParam()
self.data_output = None
self.model_output = None
def _init_runtime_parameters(self, cpn_input):
self.model_param.update(cpn_input.parameters)
self._init_model()
def _init_model(self):
self.data_num = self.model_param.data_num
self.partition = self.model_param.partition
self.seed = self.model_param.seed
def _init_data(self):
kvs = [(i, 1) for i in range(self.data_num)]
self.x = session.parallelize(kvs, include_key=True, partition=self.partition)
def share(self, x):
first = np.random.uniform(x, -x)
return first, x - first
def secure(self):
x_shares = self.x.mapValues(self.share)
self.x1 = x_shares.mapValues(lambda shares: shares[0])
self.x2 = x_shares.mapValues(lambda shares: shares[1])
def add(self):
self.x1_plus_y1 = self.x1.join(self.y1, lambda x, y: x + y)
guest_sum = self.x1_plus_y1.reduce(lambda x, y: x + y)
return guest_sum
def reconstruct(self, guest_sum, host_sum):
print("host sum is %.4f" % host_sum)
print("guest sum is %.4f" % guest_sum)
secure_sum = host_sum + guest_sum
print("Secure Add Result is %.4f" % secure_sum)
return secure_sum
def sync_share_to_host(self):
self.transfer_inst.guest_share.remote(self.x2,
role="host",
idx=0)
def recv_share_from_host(self):
self.y1 = self.transfer_inst.host_share.get(idx=0)
def recv_host_sum_from_host(self):
host_sum = self.transfer_inst.host_sum.get(idx=0)
return host_sum
def run(self, cpn_input):
LOGGER.info("begin to init parameters of secure add example guest")
self._init_runtime_parameters(cpn_input)
LOGGER.info("begin to make guest data")
self._init_data()
LOGGER.info("split data into two random parts")
self.secure()
LOGGER.info("share one random part data to host")
self.sync_share_to_host()
LOGGER.info("get share of one random part data from host")
self.recv_share_from_host()
LOGGER.info("begin to get sum of guest and host")
guest_sum = self.add()
LOGGER.info("receive host sum from guest")
host_sum = self.recv_host_sum_from_host()
secure_sum = self.reconstruct(guest_sum, host_sum)
assert (np.abs(secure_sum - self.data_num * 2) < 1e-6)
LOGGER.info("success to calculate secure_sum, it is {}".format(secure_sum))
return ComponentOutput(self.save_data(), self.export_model(), self.save_cache())
| 4,172 | 32.384 | 95 |
py
|
FATE
|
FATE-master/python/federatedml/toy_example/secure_add_host.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.model_base import ModelBase, ComponentOutput
from federatedml.param.secure_add_example_param import SecureAddExampleParam
from federatedml.transfer_variable.transfer_class.secure_add_example_transfer_variable import \
SecureAddExampleTransferVariable
from federatedml.util import LOGGER
class SecureAddHost(ModelBase):
def __init__(self):
super(SecureAddHost, self).__init__()
self.y = None
self.y1 = None
self.y2 = None
self.x2 = None
self.x2_plus_y2 = None
self.transfer_inst = SecureAddExampleTransferVariable()
self.model_param = SecureAddExampleParam()
self.data_output = None
self.model_output = None
def _init_runtime_parameters(self, cpn_input):
self.model_param.update(cpn_input.parameters)
self._init_model()
def _init_model(self):
self.data_num = self.model_param.data_num
self.partition = self.model_param.partition
self.seed = self.model_param.seed
def _init_data(self):
kvs = [(i, 1) for i in range(self.data_num)]
self.y = session.parallelize(kvs, include_key=True, partition=self.partition)
def share(self, y):
first = np.random.uniform(y, -y)
return first, y - first
def secure(self):
y_shares = self.y.mapValues(self.share)
self.y1 = y_shares.mapValues(lambda shares: shares[0])
self.y2 = y_shares.mapValues(lambda shares: shares[1])
def add(self):
self.x2_plus_y2 = self.y2.join(self.x2, lambda y, x: y + x)
host_sum = self.x2_plus_y2.reduce(lambda x, y: x + y)
return host_sum
def sync_share_to_guest(self):
self.transfer_inst.host_share.remote(self.y1,
role="guest",
idx=0)
def recv_share_from_guest(self):
self.x2 = self.transfer_inst.guest_share.get(idx=0)
def sync_host_sum_to_guest(self, host_sum):
self.transfer_inst.host_sum.remote(host_sum,
role="guest",
idx=0)
def run(self, cpn_input):
LOGGER.info("begin to init parameters of secure add example host")
self._init_runtime_parameters(cpn_input)
LOGGER.info("begin to make host data")
self._init_data()
LOGGER.info("split data into two random parts")
self.secure()
LOGGER.info("get share of one random part data from guest")
self.recv_share_from_guest()
LOGGER.info("share one random part data to guest")
self.sync_share_to_guest()
LOGGER.info("begin to get sum of host and guest")
host_sum = self.add()
LOGGER.info("send host sum to guest")
self.sync_host_sum_to_guest(host_sum)
return ComponentOutput(self.save_data(), self.export_model(), self.save_cache())
| 3,686 | 33.783019 | 95 |
py
|
FATE
|
FATE-master/python/federatedml/one_vs_rest/one_vs_rest.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import functools
from federatedml.model_base import ModelBase
from federatedml.transfer_variable.transfer_class.one_vs_rest_transfer_variable import OneVsRestTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.util.io_check import assert_io_num_rows_equal
class OneVsRest(object):
def __init__(self, classifier, role, mode, has_arbiter):
self.classifier = classifier
self.transfer_variable = OneVsRestTransferVariable()
self.classes = None
self.role = role
self.mode = mode
self.flow_id = 0
self.has_arbiter = has_arbiter
self.models = []
self.class_name = self.__class__.__name__
@staticmethod
def __get_multi_class_res(instance, classes):
"""
return max_prob and its class where max_prob is the max probably in input instance
"""
max_prob = -1
max_prob_index = -1
instance_with_class = {}
for (i, prob) in enumerate(instance):
instance_with_class[classes[i]] = prob
if prob > max_prob:
max_prob = prob
max_prob_index = i
return classes[max_prob_index], max_prob, instance_with_class
def get_data_classes(self, data_instances):
"""
get all classes in data_instances
"""
class_set = None
if self.has_label:
num_class, class_list = ClassifyLabelChecker.validate_label(data_instances)
class_set = set(class_list)
self._synchronize_classes_list(class_set)
return self.classes
@staticmethod
def _mask_data_label(data_instances, label):
"""
mask the instance.label to 1 if equals to label and 0 if not
"""
def do_mask_label(instance):
instance.label = (1 if (instance.label == label) else 0)
return instance
f = functools.partial(do_mask_label)
data_instances = data_instances.mapValues(f)
return data_instances
def _sync_class_guest(self, class_set):
raise NotImplementedError("Function should not be called here")
def _sync_class_host(self, class_set):
raise NotImplementedError("Function should not be called here")
def _sync_class_arbiter(self):
raise NotImplementedError("Function should not be called here")
def _synchronize_classes_list(self, class_set):
"""
Guest will get classes from host data, and aggregate classes it has. After that, send the aggregate classes to
host and arbiter as binary classification times.
"""
if self.role == consts.GUEST:
self._sync_class_guest(class_set)
elif self.role == consts.HOST:
self._sync_class_host(class_set)
else:
self._sync_class_arbiter()
@property
def has_label(self):
raise NotImplementedError("Function should not be called here")
def fit(self, data_instances=None, validate_data=None):
"""
Fit OneVsRest model
Parameters:
----------
data_instances: Table of instances
"""
LOGGER.info("mode is {}, role is {}, start to one_vs_rest fit".format(self.mode, self.role))
LOGGER.info("Total classes:{}".format(self.classes))
self.classifier.callback_one_vs_rest = True
current_flow_id = self.classifier.flowid
summary_dict = {}
for label_index, label in enumerate(self.classes):
LOGGER.info("Start to train OneVsRest with label_index:{}, label:{}".format(label_index, label))
classifier = copy.deepcopy(self.classifier)
classifier.need_one_vs_rest = False
classifier.set_flowid(".".join([current_flow_id, "model_" + str(label_index)]))
if self.has_label:
header = data_instances.schema.get("header")
data_instances_mask_label = self._mask_data_label(data_instances, label=label)
data_instances_mask_label.schema['header'] = header
if validate_data is not None:
validate_mask_label_data = self._mask_data_label(validate_data, label=label)
validate_mask_label_data.schema['header'] = header
else:
validate_mask_label_data = validate_data
LOGGER.info("finish mask label:{}".format(label))
LOGGER.info("start classifier fit")
classifier.fit_binary(data_instances_mask_label, validate_data=validate_mask_label_data)
else:
LOGGER.info("start classifier fit")
classifier.fit_binary(data_instances, validate_data=validate_data)
_summary = classifier.summary()
_summary['one_vs_rest'] = True
summary_dict[label] = _summary
self.models.append(classifier)
if hasattr(self, "header"):
header = getattr(self, "header")
if header is None:
setattr(self, "header", getattr(classifier, "header"))
LOGGER.info("Finish model_{} training!".format(label_index))
self.classifier.set_summary(summary_dict)
def _comprehensive_result(self, predict_res_list):
"""
prob result is available for guest party only.
"""
if self.role == consts.GUEST:
# assert 1 == 2, f"predict_res_list: {predict_res_list[0].first()[1].features}"
prob = predict_res_list[0].mapValues(lambda r: [r.features[2]])
for predict_res in predict_res_list[1:]:
prob = prob.join(predict_res, lambda p, r: p + [r.features[2]])
else:
prob = None
return prob
@assert_io_num_rows_equal
def predict(self, data_instances):
"""
Predict OneVsRest model
Parameters:
----------
data_instances: Table of instances
predict_param: PredictParam of classifier
Returns:
----------
predict_res: Table, if has predict_res, it includes ground true label, predict probably and predict label
"""
LOGGER.info("Start one_vs_all predict procedure.")
predict_res_list = []
for i, model in enumerate(self.models):
current_flow_id = model.flowid
model.set_flowid(".".join([current_flow_id, "model_" + str(i)]))
LOGGER.info("Start to predict with model:{}".format(i))
# model.set_flowid("predict_" + str(i))
single_predict_res = model.predict(data_instances)
predict_res_list.append(single_predict_res)
prob = self._comprehensive_result(predict_res_list)
if prob:
# f = functools.partial(self.__get_multi_class_res, classes=list(self.classes))
# multi_classes_res = prob.mapValues(f)
# predict_res = data_instances.join(multi_classes_res, lambda d, m: [d.label, m[0], m[1], m[2]])
# def _transfer(instance, pred_res):
# return Instance(features=pred_res, inst_id=instance.inst_id)
# predict_res = data_instances.join(predict_res, _transfer)
predict_res = ModelBase.predict_score_to_output(data_instances, prob, list(self.classes))
else:
predict_res = None
#
# LOGGER.info("finish OneVsRest Predict, return predict results.")
return predict_res
def save(self, single_model_pb):
"""
Save each classifier model of OneVsRest. It just include model_param but not model_meta now
"""
classifier_pb_objs = []
for classifier in self.models:
single_param_dict = classifier.get_single_model_param()
classifier_pb_objs.append(single_model_pb(**single_param_dict))
one_vs_rest_class = [str(x) for x in self.classes]
one_vs_rest_result = {
'completed_models': classifier_pb_objs,
'one_vs_rest_classes': one_vs_rest_class
}
return one_vs_rest_result
def load_model(self, one_vs_rest_result):
"""
Load OneVsRest model
"""
completed_models = one_vs_rest_result.completed_models
one_vs_rest_classes = one_vs_rest_result.one_vs_rest_classes
self.classes = [int(x) for x in one_vs_rest_classes] # Support other label type in the future
self.models = []
for classifier_obj in list(completed_models):
classifier = copy.deepcopy(self.classifier)
classifier.load_single_model(classifier_obj)
classifier.need_one_vs_rest = False
self.models.append(classifier)
class HomoOneVsRest(OneVsRest):
def __init__(self, classifier, role, mode, has_arbiter):
super().__init__(classifier, role, mode, has_arbiter)
self.header = None
def set_header(self, header):
self.header = header
@property
def has_label(self):
if self.role == consts.ARBITER:
return False
return True
def _sync_class_guest(self, class_set):
host_classes_list = self.transfer_variable.host_classes.get(idx=-1)
for host_class in host_classes_list:
class_set = class_set | host_class
self.classes = list(class_set)
self.transfer_variable.aggregate_classes.remote(self.classes,
role=consts.HOST,
idx=-1)
if self.has_arbiter:
class_num = len(self.classes)
self.transfer_variable.aggregate_classes.remote(class_num,
role=consts.ARBITER,
idx=0)
def _sync_class_host(self, class_set):
self.transfer_variable.host_classes.remote(class_set,
role=consts.GUEST,
idx=0)
self.classes = self.transfer_variable.aggregate_classes.get(idx=0)
def _sync_class_arbiter(self):
class_nums = self.transfer_variable.aggregate_classes.get(idx=0)
self.classes = [x for x in range(class_nums)]
class HeteroOneVsRest(OneVsRest):
@property
def has_label(self):
if self.role == consts.GUEST:
return True
return False
def _sync_class_guest(self, class_set):
self.classes = list(class_set)
class_num = len(self.classes)
self.transfer_variable.aggregate_classes.remote(class_num,
role=consts.HOST,
idx=-1)
if self.has_arbiter:
self.transfer_variable.aggregate_classes.remote(class_num,
role=consts.ARBITER,
idx=0)
def _sync_class_host(self, class_set):
LOGGER.debug("Start to get aggregate classes")
class_nums = self.transfer_variable.aggregate_classes.get(idx=0)
self.classes = [x for x in range(class_nums)]
def _sync_class_arbiter(self):
class_nums = self.transfer_variable.aggregate_classes.get(idx=0)
self.classes = [x for x in range(class_nums)]
def one_vs_rest_factory(classifier, role, mode, has_arbiter):
LOGGER.info("Create one_vs_rest object, role: {}, mode: {}".format(role, mode))
if mode == consts.HOMO:
return HomoOneVsRest(classifier, role, mode, has_arbiter)
elif mode == consts.HETERO:
return HeteroOneVsRest(classifier, role, mode, has_arbiter)
else:
raise ValueError(f"Cannot recognize mode: {mode} in one vs rest")
| 12,563 | 38.509434 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/one_vs_rest/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/cipher_compressor/compressor.py
|
import math
from abc import ABC
from abc import abstractmethod
from federatedml.util import LOGGER
from federatedml.secureprotol import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.transfer_variable.transfer_class.cipher_compressor_transfer_variable \
import CipherCompressorTransferVariable
def get_homo_encryption_max_int(encrypter):
if isinstance(encrypter, (PaillierEncrypt, IpclPaillierEncrypt)):
max_pos_int = encrypter.public_key.max_int
min_neg_int = -max_pos_int
else:
raise ValueError('unknown encryption type')
return max_pos_int, min_neg_int
def cipher_compress_advisor(encrypter, plaintext_bit_len):
max_pos_int, min_neg_int = get_homo_encryption_max_int(encrypter)
max_bit_len = max_pos_int.bit_length()
capacity = max_bit_len // plaintext_bit_len
return capacity
class CipherPackage(ABC):
@abstractmethod
def add(self, obj):
pass
@abstractmethod
def unpack(self, decrypter):
pass
@abstractmethod
def has_space(self):
pass
class PackingCipherTensor(object):
"""
A naive realization of cipher tensor
"""
def __init__(self, ciphers):
if isinstance(ciphers, list):
if len(ciphers) == 1:
self.ciphers = ciphers[0]
else:
self.ciphers = ciphers
self.dim = len(ciphers)
else:
self.ciphers = ciphers
self.dim = 1
def __add__(self, other):
new_cipher_list = []
if isinstance(other, PackingCipherTensor):
assert self.dim == other.dim
if self.dim == 1:
return PackingCipherTensor(self.ciphers + other.ciphers)
for c1, c2 in zip(self.ciphers, other.ciphers):
new_cipher_list.append(c1 + c2)
return PackingCipherTensor(ciphers=new_cipher_list)
else:
# scalar / single en num
if self.dim == 1:
return PackingCipherTensor(self.ciphers + other)
for c in self.ciphers:
new_cipher_list.append(c + other)
return PackingCipherTensor(ciphers=new_cipher_list)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return other + (self * -1)
def __mul__(self, other):
if self.dim == 1:
return PackingCipherTensor(self.ciphers * other)
new_cipher_list = []
for c in self.ciphers:
new_cipher_list.append(c * other)
return PackingCipherTensor(new_cipher_list)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1 / other)
def __repr__(self):
return "[" + self.ciphers.__repr__() + "], dim {}".format(self.dim)
class NormalCipherPackage(CipherPackage):
def __init__(self, padding_length, max_capacity):
self._padding_num = 2 ** padding_length
self.max_capacity = max_capacity
self._cipher_text = None
self._capacity_left = max_capacity
self._has_space = True
def add(self, cipher_text):
if self._capacity_left == 0:
raise ValueError('cipher number exceeds package max capacity')
if self._cipher_text is None:
self._cipher_text = cipher_text
else:
self._cipher_text = self._cipher_text * self._padding_num
self._cipher_text = self._cipher_text + cipher_text
self._capacity_left -= 1
if self._capacity_left == 0:
self._has_space = False
def unpack(self, decrypter):
if isinstance(decrypter, (PaillierEncrypt, IpclPaillierEncrypt)):
compressed_plain_text = decrypter.raw_decrypt(self._cipher_text)
else:
raise ValueError('unknown decrypter: {}'.format(type(decrypter)))
if self.cur_cipher_contained() == 1:
return [compressed_plain_text]
unpack_result = []
bit_len = (self._padding_num - 1).bit_length()
for i in range(self.cur_cipher_contained()):
num = (compressed_plain_text & (self._padding_num - 1))
compressed_plain_text = compressed_plain_text >> bit_len
unpack_result.insert(0, num)
return unpack_result
def has_space(self):
return self._has_space
def cur_cipher_contained(self):
return self.max_capacity - self._capacity_left
def retrieve(self):
return self._cipher_text
class PackingCipherTensorPackage(CipherPackage):
"""
A naive realization of compressible tensor(only compress last dimension because previous ciphers have
no space for compressing)
"""
def __init__(self, padding_length, max_capcity):
self.cached_list = []
self.compressed_cipher = []
self.compressed_dim = -1
self.not_compress_len = None
self.normal_package = NormalCipherPackage(padding_length, max_capcity)
def add(self, obj: PackingCipherTensor):
if self.normal_package.has_space():
if obj.dim == 1:
self.normal_package.add(obj.ciphers)
else:
self.cached_list.extend(obj.ciphers[:-1])
self.not_compress_len = len(obj.ciphers[:-1])
self.normal_package.add(obj.ciphers[-1])
else:
raise ValueError('have no space for compressing')
def unpack(self, decrypter):
compressed_part = self.normal_package.unpack(decrypter)
de_rs = []
if len(self.cached_list) != 0:
de_rs = decrypter.recursive_raw_decrypt(self.cached_list)
if len(de_rs) == 0:
return [[i] for i in compressed_part]
else:
rs = []
idx_0, idx_1 = 0, 0
while idx_0 < len(self.cached_list):
rs.append(de_rs[idx_0: idx_0 + self.not_compress_len] + [compressed_part[idx_1]])
idx_0 += self.not_compress_len
idx_1 += 1
return rs
def has_space(self):
return self.normal_package.has_space()
class CipherCompressorHost(object):
def __init__(self, package_class=PackingCipherTensorPackage, sync_para=True):
"""
Parameters
----------
package_class type of compressed packages
"""
self._package_class = package_class
self._padding_length, self._capacity = None, None
if sync_para:
self.transfer_var = CipherCompressorTransferVariable()
# received from host
self._padding_length, self._capacity = self.transfer_var.compress_para.get(idx=0)
LOGGER.debug("received parameter from guest is {} {}".format(self._padding_length, self._capacity))
def compress(self, encrypted_obj_list):
rs = []
encrypted_obj_list = list(encrypted_obj_list)
cur_package = self._package_class(self._padding_length, self._capacity)
for c in encrypted_obj_list:
if not cur_package.has_space():
rs.append(cur_package)
cur_package = self._package_class(self._padding_length, self._capacity)
cur_package.add(c)
rs.append(cur_package)
return rs
def compress_dtable(self, table):
rs = table.mapValues(self.compress)
return rs
if __name__ == '__main__':
a = PackingCipherTensor([1, 2, 3, 4])
b = PackingCipherTensor([2, 3, 4, 5])
c = PackingCipherTensor(124)
d = PackingCipherTensor([114514])
| 7,651 | 29.365079 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/cipher_compressor/packer.py
|
import functools
from federatedml.util import LOGGER
from federatedml.secureprotol import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.cipher_compressor.compressor import get_homo_encryption_max_int
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.cipher_compressor.compressor import PackingCipherTensor
from federatedml.cipher_compressor.compressor import CipherPackage
from federatedml.transfer_variable.transfer_class.cipher_compressor_transfer_variable \
import CipherCompressorTransferVariable
from federatedml.util import consts
from typing import Union
def cipher_list_to_cipher_tensor(cipher_list: list):
cipher_tensor = PackingCipherTensor(ciphers=cipher_list)
return cipher_tensor
class GuestIntegerPacker(object):
def __init__(self, pack_num: int, pack_num_range: list, encrypter: Union[PaillierEncrypt, IpclPaillierEncrypt],
sync_para=True):
"""
max_int: max int allowed for packing result
pack_num: number of int to pack, they must be POSITIVE integer
pack_num_range: list of integer, it gives range of every integer to pack
need_cipher_compress: if dont need cipher compress, related parameter will be set to 1
"""
self._pack_num = pack_num
assert len(pack_num_range) == self._pack_num, 'list len must equal to pack_num'
self._pack_num_range = pack_num_range
self._pack_num_bit = [i.bit_length() for i in pack_num_range]
self.encrypter = encrypter
max_pos_int, _ = get_homo_encryption_max_int(self.encrypter)
self._max_int = max_pos_int
self._max_bit = self._max_int.bit_length() - 1 # reserve 1 bit, in case overflow
# sometimes max_int is not able to hold all num need to be packed, so we
# use more than one large integer to pack them all
self.bit_assignment = []
tmp_list = []
bit_count = 0
for bit_len in self._pack_num_bit:
if bit_count + bit_len >= self._max_bit:
if bit_count == 0:
raise ValueError('unable to pack this num using in current int capacity')
self.bit_assignment.append(tmp_list)
tmp_list = []
bit_count = 0
bit_count += bit_len
tmp_list.append(bit_len)
if len(tmp_list) != 0:
self.bit_assignment.append(tmp_list)
self._pack_int_needed = len(self.bit_assignment)
# transfer variable
compress_parameter = self.cipher_compress_suggest()
if sync_para:
self.trans_var = CipherCompressorTransferVariable()
self.trans_var.compress_para.remote(compress_parameter, role=consts.HOST, idx=-1)
LOGGER.debug('int packer init done, bit assign is {}, compress para is {}'.format(self.bit_assignment,
compress_parameter))
def cipher_compress_suggest(self):
compressible = self.bit_assignment[-1]
total_bit_count = sum(compressible)
compress_num = self._max_bit // total_bit_count
padding_bit = total_bit_count
return padding_bit, compress_num
def pack_int_list(self, int_list: list):
assert len(int_list) == self._pack_num, 'list length is not equal to pack_num'
start_idx = 0
rs = []
for bit_assign_of_one_int in self.bit_assignment:
to_pack = int_list[start_idx: start_idx + len(bit_assign_of_one_int)]
packing_rs = self._pack_fix_len_int_list(to_pack, bit_assign_of_one_int)
rs.append(packing_rs)
start_idx += len(bit_assign_of_one_int)
return rs
def _pack_fix_len_int_list(self, int_list: list, bit_assign: list):
result = int_list[0]
for i, offset in zip(int_list[1:], bit_assign[1:]):
result = result << offset
result += i
return result
def unpack_an_int(self, integer: int, bit_assign_list: list):
rs_list = []
for bit_assign in reversed(bit_assign_list[1:]):
mask_int = (2**bit_assign) - 1
unpack_int = integer & mask_int
rs_list.append(unpack_int)
integer = integer >> bit_assign
rs_list.append(integer)
return list(reversed(rs_list))
def pack(self, data_table):
packing_data_table = data_table.mapValues(self.pack_int_list)
return packing_data_table
def pack_and_encrypt(self, data_table, post_process_func=cipher_list_to_cipher_tensor):
packing_data_table = self.pack(data_table)
en_packing_data_table = self.encrypter.distribute_raw_encrypt(packing_data_table)
if post_process_func:
en_packing_data_table = en_packing_data_table.mapValues(post_process_func)
return en_packing_data_table
def unpack_result(self, decrypted_result_list: list, post_func=None):
final_rs = []
for l_ in decrypted_result_list:
rs_list = self.unpack_an_int_list(l_, post_func)
final_rs.append(rs_list)
return final_rs
def unpack_an_int_list(self, int_list, post_func=None):
assert len(int_list) == len(self.bit_assignment), 'length of integer list is not equal to bit_assignment'
rs_list = []
for idx, integer in enumerate(int_list):
unpack_list = self.unpack_an_int(integer, self.bit_assignment[idx])
if post_func:
unpack_list = post_func(unpack_list)
rs_list.extend(unpack_list)
return rs_list
def decrypt_cipher_packages(self, content):
if isinstance(content, list):
assert issubclass(type(content[0]), CipherPackage), 'content is not CipherPackages'
decrypt_rs = []
for i in content:
unpack_ = i.unpack(self.encrypter)
decrypt_rs += unpack_
return decrypt_rs
else:
raise ValueError('illegal input type')
def decrypt_cipher_package_and_unpack(self, data_table):
de_func = functools.partial(self.decrypt_cipher_packages)
de_table = data_table.mapValues(de_func)
unpack_table = de_table.mapValues(self.unpack_result)
return unpack_table
| 6,352 | 37.041916 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/cipher_compressor/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/cipher_compressor/test/packing_tensor_test.py
|
import unittest
import numpy as np
from federatedml.cipher_compressor.compressor import PackingCipherTensor
from federatedml.secureprotol import PaillierEncrypt
class TestXgboostCriterion(unittest.TestCase):
def setUp(self):
print('init testing')
def test_plain_add_sub_mul(self):
a = PackingCipherTensor([1, 2, 3, 4])
b = PackingCipherTensor([2, 3, 4, 5])
c = PackingCipherTensor(124)
d = PackingCipherTensor([114514])
print(a + b)
print(b + a)
print(c * 123)
print(d * 314)
print(12 * a)
print(a * 2)
print(a / 12)
print(b - a)
print(d + 3)
print('plain test done')
print('*' * 30)
def test_cipher_add_sub_mul(self):
encrypter = PaillierEncrypt()
encrypter.generate_key(1024)
en_1, en_2, en_3, en_4 = encrypter.encrypt(1), encrypter.encrypt(2), encrypter.encrypt(3), encrypter.encrypt(4)
en_5, en_6, en_7, en_8 = encrypter.encrypt(5), encrypter.encrypt(6), encrypter.encrypt(7), encrypter.encrypt(8)
a = PackingCipherTensor([en_1, en_2, en_3, en_4])
b = PackingCipherTensor([en_5, en_6, en_7, en_8])
c = PackingCipherTensor(encrypter.encrypt(1))
d = PackingCipherTensor([encrypter.encrypt(5)])
rs_1 = a + b
rs_2 = b - a
rs_3 = c + d
rs_4 = 123 * c
rs_5 = d * 456
rs_6 = a * 114
print(encrypter.recursive_decrypt(rs_1.ciphers))
print(encrypter.recursive_decrypt(rs_2.ciphers))
print(encrypter.recursive_decrypt(rs_3.ciphers))
print(encrypter.decrypt(rs_4.ciphers))
print(encrypter.decrypt(rs_5.ciphers))
print(encrypter.recursive_decrypt(rs_6.ciphers))
print('cipher test done')
print('*' * 30)
if __name__ == '__main__':
unittest.main()
| 1,871 | 29.688525 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/components/intersection.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
intersection_cpn_meta = ComponentMeta("Intersection")
@intersection_cpn_meta.bind_param
def intersection_param():
from federatedml.param.intersect_param import IntersectParam
return IntersectParam
@intersection_cpn_meta.bind_runner.on_guest
def intersection_guest_runner():
from federatedml.statistic.intersect.intersect_model import IntersectGuest
return IntersectGuest
@intersection_cpn_meta.bind_runner.on_host
def intersection_host_runner():
from federatedml.statistic.intersect.intersect_model import IntersectHost
return IntersectHost
| 1,232 | 27.674419 | 78 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.