repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/ftl_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class FTLTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.guest_components = self._create_variable(name='guest_components', src=['guest'], dst=['host'])
self.y_overlap_2_phi_2 = self._create_variable(name='y_overlap_2_phi_2', src=['guest'], dst=['host'])
self.y_overlap_phi = self._create_variable(name='y_overlap_phi', src=['guest'], dst=['host'])
self.mapping_comp_a = self._create_variable(name='mapping_comp_a', src=['guest'], dst=['host'])
self.stop_flag = self._create_variable(name='stop_flag', src=['guest'], dst=['host'])
self.host_components = self._create_variable(name='host_components', src=['host'], dst=['guest'])
self.overlap_ub = self._create_variable(name='overlap_ub', src=['host'], dst=['guest'])
self.overlap_ub_2 = self._create_variable(name='overlap_ub_2', src=['host'], dst=['guest'])
self.mapping_comp_b = self._create_variable(name='mapping_comp_b', src=['host'], dst=['guest'])
self.host_side_gradients = self._create_variable(name='host_side_gradients', src=['host'], dst=['guest'])
self.guest_side_gradients = self._create_variable(name='guest_side_gradients', src=['guest'], dst=['host'])
self.guest_side_const = self._create_variable(name='guest_side_const', src=['guest'], dst=['host'])
self.encrypted_loss = self._create_variable(name='encrypted_loss', src=['guest'], dst=['host'])
self.decrypted_loss = self._create_variable(name='decrypted_loss', src=['host'], dst=['guest'])
self.decrypted_guest_gradients = self._create_variable(
name='decrypted_guest_gradients', src=['host'], dst=['guest'])
self.decrypted_guest_const = self._create_variable(name='decrypted_guest_const', src=['host'], dst=['guest'])
self.decrypted_host_gradients = self._create_variable(
name='decrypted_host_gradients', src=['guest'], dst=['host'])
self.predict_stop_flag = self._create_variable(name='predict_stop_flag', src=['host'], dst=['guest'])
self.predict_host_u = self._create_variable(name='predict_host_u', src=['host'], dst=['guest'])
self.encrypted_predict_score = self._create_variable(
name='encrypted_predict_score', src=['guest'], dst=['host'])
self.masked_predict_score = self._create_variable(name='masked_predict_score', src=['host'], dst=['guest'])
self.final_predict_score = self._create_variable(name='final_predict_score', src=['guest'], dst=['host'])
self.predict_batch_num = self._create_variable(name='predict_batch_num', src=['host'], dst=['guest'])
| 3,684 | 61.457627 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/secure_information_retrieval_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class SecureInformationRetrievalTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.data_count = self._create_variable(name='data_count', src=['host'], dst=['guest'])
self.natural_indexation = self._create_variable(name='natural_indexation', src=['guest'], dst=['host'])
self.block_num = self._create_variable(name='block_num', src=['guest'], dst=['host'])
self.id_blocks_ciphertext = self._create_variable(name='id_blocks_ciphertext', src=['host'], dst=['guest'])
self.raw_id_list = self._create_variable(name='raw_id_list', src=['guest'], dst=['host'])
self.raw_value_list = self._create_variable(name='raw_value_list', src=['host'], dst=['guest'])
self.coverage = self._create_variable(name='coverage', src=['guest'], dst=['host'])
self.nonce_list = self._create_variable(name='nonce_list', src=['host'], dst=['guest'])
self.block_confirm = self._create_variable(name='block_confirm', src=['guest'], dst=['host'])
| 2,064 | 48.166667 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_label_encoder_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoLabelEncoderTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.local_labels = self._create_variable(name='local_labels', src=['guest', 'host'], dst=['arbiter'])
self.label_mapping = self._create_variable(name='label_mapping', src=['arbiter'], dst=['host', 'guest'])
| 1,368 | 38.114286 | 112 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_decision_tree_transfer_variable_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroDecisionTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.dispatch_node_host = self._create_variable(name='dispatch_node_host', src=['guest'], dst=['host'])
self.dispatch_node_host_result = self._create_variable(
name='dispatch_node_host_result', src=['host'], dst=['guest'])
self.encrypted_grad_and_hess = self._create_variable(
name='encrypted_grad_and_hess', src=['guest'], dst=['host'])
self.encrypted_splitinfo_host = self._create_variable(
name='encrypted_splitinfo_host', src=['host'], dst=['guest'])
self.federated_best_splitinfo_host = self._create_variable(
name='federated_best_splitinfo_host', src=['guest'], dst=['host'])
self.final_splitinfo_host = self._create_variable(name='final_splitinfo_host', src=['host'], dst=['guest'])
self.node_positions = self._create_variable(name='node_positions', src=['guest'], dst=['host'])
self.predict_data = self._create_variable(name='predict_data', src=['guest'], dst=['host'])
self.predict_data_by_host = self._create_variable(name='predict_data_by_host', src=['host'], dst=['guest'])
self.predict_finish_tag = self._create_variable(name='predict_finish_tag', src=['guest'], dst=['host'])
self.tree = self._create_variable(name='tree', src=['guest'], dst=['host'])
self.tree_node_queue = self._create_variable(name='tree_node_queue', src=['guest'], dst=['host'])
self.host_cur_to_split_node_num = self._create_variable(
name='host_cur_to_split_node_num', src=['host'], dst=['guest'])
self.host_leafs = self._create_variable(name='host_leafs', src=['host'], dst=['guest'])
self.sync_flag = self._create_variable(name='sync_flag', src=['guest'], dst=['host'])
| 2,885 | 53.45283 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_boosting_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoBoostingTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.tree_dim = self._create_variable(name='tree_dim', src=['guest', 'host'], dst=['arbiter'])
self.feature_number = self._create_variable(name='feature_number', src=['guest', 'host'], dst=['arbiter'])
self.start_and_end_round = self._create_variable(
name='start_and_end_round', src=[
'guest', 'host'], dst=['arbiter'])
self.stop_flag = self._create_variable(name='stop_flag', src=['arbiter'], dst=['guest', 'host'])
self.valid_features = self._create_variable(name='valid_features', src=['arbiter'], dst=['guest', 'host'])
| 1,733 | 42.35 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_secure_boost_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroSecureBoostingTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.stop_flag = self._create_variable(name='stop_flag', src=['guest'], dst=['host'])
self.tree_dim = self._create_variable(name='tree_dim', src=['guest'], dst=['host'])
self.predict_start_round = self._create_variable(name='predict_start_round', src=['guest'], dst=['host'])
| 1,452 | 39.361111 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_lr_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoLRTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.aggregated_model = self._create_variable(name='aggregated_model', src=['arbiter'], dst=['guest', 'host'])
self.dh_ciphertext_bc = self._create_variable(name='dh_ciphertext_bc', src=['arbiter'], dst=['guest', 'host'])
self.dh_ciphertext_guest = self._create_variable(name='dh_ciphertext_guest', src=['guest'], dst=['arbiter'])
self.dh_ciphertext_host = self._create_variable(name='dh_ciphertext_host', src=['host'], dst=['arbiter'])
self.dh_pubkey = self._create_variable(name='dh_pubkey', src=['arbiter'], dst=['guest', 'host'])
self.guest_loss = self._create_variable(name='guest_loss', src=['guest'], dst=['arbiter'])
self.guest_model = self._create_variable(name='guest_model', src=['guest'], dst=['arbiter'])
self.guest_party_weight = self._create_variable(name='guest_party_weight', src=['guest'], dst=['arbiter'])
self.guest_uuid = self._create_variable(name='guest_uuid', src=['guest'], dst=['arbiter'])
self.host_loss = self._create_variable(name='host_loss', src=['host'], dst=['arbiter'])
self.host_model = self._create_variable(name='host_model', src=['host'], dst=['arbiter'])
self.host_party_weight = self._create_variable(name='host_party_weight', src=['host'], dst=['arbiter'])
self.host_uuid = self._create_variable(name='host_uuid', src=['host'], dst=['arbiter'])
self.is_converge = self._create_variable(name='is_converge', src=['arbiter'], dst=['guest', 'host'])
self.paillier_pubkey = self._create_variable(name='paillier_pubkey', src=['arbiter'], dst=['host'])
self.predict_result = self._create_variable(name='predict_result', src=['arbiter'], dst=['host'])
self.predict_wx = self._create_variable(name='predict_wx', src=['host'], dst=['arbiter'])
self.re_encrypt_times = self._create_variable(name='re_encrypt_times', src=['host'], dst=['arbiter'])
self.re_encrypted_model = self._create_variable(name='re_encrypted_model', src=['arbiter'], dst=['host'])
self.to_encrypt_model = self._create_variable(name='to_encrypt_model', src=['host'], dst=['arbiter'])
self.use_encrypt = self._create_variable(name='use_encrypt', src=['host'], dst=['arbiter'])
self.uuid_conflict_flag = self._create_variable(
name='uuid_conflict_flag', src=['arbiter'], dst=['guest', 'host'])
| 3,515 | 61.785714 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_secure_boosting_tree_transfer_variable_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroSecureBoostingTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.stop_flag = self._create_variable(name='stop_flag', src=['guest'], dst=['host'])
self.tree_dim = self._create_variable(name='tree_dim', src=['guest'], dst=['host'])
self.predict_start_round = self._create_variable(name='predict_start_round', src=['guest'], dst=['host'])
| 1,452 | 39.361111 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_secure_boosting_predict_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroSecureBoostTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.predict_stop_flag = self._create_variable(name='predict_stop_flag', src=['guest'], dst=['host'])
self.guest_predict_data = self._create_variable(name='guest_predict_data', src=['guest'], dst=['host'])
self.host_predict_data = self._create_variable(name='host_predict_data', src=['host'], dst=['guest'])
self.inter_host_data = self._create_variable(name='inter_host_data', src=['host'], dst=['host'])
self.host_feature_importance = self._create_variable(
name='host_feature_importance', src=['host'], dst=['guest'])
| 1,717 | 43.051282 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_nn_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroNNTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.batch_data_index = self._create_variable(name='batch_data_index', src=['guest'], dst=['host'])
self.batch_info = self._create_variable(name='batch_info', src=['guest'], dst=['host'])
self.dataset_info = self._create_variable(name='dataset_info', src=['guest'], dst=['host'])
self.is_converge = self._create_variable(name='is_converge', src=['guest'], dst=['host'])
| 1,538 | 40.594595 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_poisson_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroPoissonTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.batch_data_index = self._create_variable(name='batch_data_index', src=['guest'], dst=['host'])
self.batch_info = self._create_variable(name='batch_info', src=['guest'], dst=['host', 'arbiter'])
self.converge_flag = self._create_variable(name='converge_flag', src=['arbiter'], dst=['host', 'guest'])
self.fore_gradient = self._create_variable(name='fore_gradient', src=['guest'], dst=['host'])
self.guest_gradient = self._create_variable(name='guest_gradient', src=['guest'], dst=['arbiter'])
self.guest_optim_gradient = self._create_variable(name='guest_optim_gradient', src=['arbiter'], dst=['guest'])
self.host_forward = self._create_variable(name='host_forward', src=['host'], dst=['guest'])
self.host_gradient = self._create_variable(name='host_gradient', src=['host'], dst=['arbiter'])
self.host_loss_regular = self._create_variable(name='host_loss_regular', src=['host'], dst=['guest'])
self.host_optim_gradient = self._create_variable(name='host_optim_gradient', src=['arbiter'], dst=['host'])
self.host_partial_prediction = self._create_variable(
name='host_partial_prediction', src=['host'], dst=['guest'])
self.loss = self._create_variable(name='loss', src=['guest'], dst=['arbiter'])
self.loss_intermediate = self._create_variable(name='loss_intermediate', src=['host'], dst=['guest'])
self.paillier_pubkey = self._create_variable(name='paillier_pubkey', src=['arbiter'], dst=['host', 'guest'])
| 2,676 | 54.770833 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/cipher_compressor_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class CipherCompressorTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.compress_para = self._create_variable(name='compress_para', src=['guest'], dst=['host'])
| 1,246 | 35.676471 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/feldman_verifiable_sum_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class FeldmanVerifiableSumTransferVariables(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.guest_share_subkey = self._create_variable(name='guest_share_subkey', src=['guest'], dst=['host'])
self.host_share_to_guest = self._create_variable(name='host_share_to_guest', src=['host'], dst=['guest'])
self.host_share_to_host = self._create_variable(name='host_share_to_host', src=['host'], dst=['host'])
self.host_sum = self._create_variable(name='host_sum', src=['host'], dst=['guest'])
self.guest_commitments = self._create_variable(name='guest_commitments', src=['guest'], dst=['host'])
self.host_commitments = self._create_variable(name='host_commitments', src=['host'], dst=['host', 'guest'])
| 1,804 | 45.282051 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/secure_add_example_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class SecureAddExampleTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.guest_share = self._create_variable(name='guest_share', src=['guest'], dst=['host'])
self.host_share = self._create_variable(name='host_share', src=['host'], dst=['guest'])
self.host_sum = self._create_variable(name='host_sum', src=['host'], dst=['guest'])
| 1,430 | 38.75 | 97 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/raw_intersect_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class RawIntersectTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.intersect_ids_guest = self._create_variable(name='intersect_ids_guest', src=['guest'], dst=['host'])
self.intersect_ids_host = self._create_variable(name='intersect_ids_host', src=['host'], dst=['guest'])
self.send_ids_guest = self._create_variable(name='send_ids_guest', src=['guest'], dst=['host'])
self.send_ids_host = self._create_variable(name='send_ids_host', src=['host'], dst=['guest'])
self.sync_intersect_ids_multi_hosts = self._create_variable(
name='sync_intersect_ids_multi_hosts', src=['guest'], dst=['host'])
| 1,721 | 43.153846 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/sshe_model_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class SSHEModelTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.q_field = self._create_variable(name='q_field', src=['guest', "host"], dst=['host', "guest"])
self.host_prob = self._create_variable(name='host_prob', src=['host'], dst=['guest'])
self.loss = self._create_variable(name='loss', src=['host'], dst=['guest'])
self.is_converged = self._create_variable(name='is_converged', src=['guest'], dst=['host'])
| 1,522 | 40.162162 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/sample_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class SampleTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.sample_ids = self._create_variable(name='sample_ids', src=['guest'], dst=['host'])
| 1,230 | 35.205882 | 95 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_feature_selection_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroFeatureSelectionTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.host_select_cols = self._create_variable(name='host_select_cols', src=['host'], dst=['guest'])
self.result_left_cols = self._create_variable(name='result_left_cols', src=['guest'], dst=['host'])
self.host_empty_cols = self._create_variable(name='host_empty_cols', src=['host'], dst=['guest'])
self.host_anonymous_header_dict = self._create_variable(name='host_anonymous_header_dict',
src=['host'],
dst=['guest'])
| 1,728 | 43.333333 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/batch_generator_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class BatchGeneratorTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.batch_data_index = self._create_variable(name='batch_data_index', src=['guest'], dst=['host'])
self.batch_info = self._create_variable(name='batch_info', src=['guest'], dst=['host', 'arbiter'])
| 1,357 | 37.8 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_feature_binning_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroFeatureBinningTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.bucket_idx = self._create_variable(name='bucket_idx', src=['guest'], dst=['host'])
self.encrypted_bin_sum = self._create_variable(name='encrypted_bin_sum', src=['host'], dst=['guest'])
self.optimal_info = self._create_variable(name='optimal_info', src=['host'], dst=['guest'])
self.encrypted_label = self._create_variable(name='encrypted_label', src=['guest'], dst=['host'])
self.paillier_pubkey = self._create_variable(name='paillier_pubkey', src=['guest'], dst=['host'])
self.transform_stage_has_label = self._create_variable(
name="transform_stage_has_label", src=['guest'], dst=['host'])
self.host_anonymous_header_dict = self._create_variable(name='host_anonymous_header_dict',
src=['host'],
dst=['guest'])
| 2,061 | 46.953488 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/cross_validation_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class CrossValidationTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.test_sid = self._create_variable(name='test_sid', src=['guest'], dst=['host'])
self.train_sid = self._create_variable(name='train_sid', src=['guest'], dst=['host'])
| 1,329 | 37 | 93 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_linr_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroLinRTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.batch_data_index = self._create_variable(name='batch_data_index', src=['guest'], dst=['host'])
self.batch_info = self._create_variable(name='batch_info', src=['guest'], dst=['host', 'arbiter'])
self.converge_flag = self._create_variable(name='converge_flag', src=['arbiter'], dst=['host', 'guest'])
self.fore_gradient = self._create_variable(name='fore_gradient', src=['guest'], dst=['host'])
self.forward_hess = self._create_variable(name='forward_hess', src=['guest'], dst=['host'])
self.guest_gradient = self._create_variable(name='guest_gradient', src=['guest'], dst=['arbiter'])
self.guest_hess_vector = self._create_variable(name='guest_hess_vector', src=['guest'], dst=['arbiter'])
self.guest_optim_gradient = self._create_variable(name='guest_optim_gradient', src=['arbiter'], dst=['guest'])
self.host_forward = self._create_variable(name='host_forward', src=['host'], dst=['guest'])
self.host_gradient = self._create_variable(name='host_gradient', src=['host'], dst=['arbiter'])
self.host_hess_vector = self._create_variable(name='host_hess_vector', src=['host'], dst=['arbiter'])
self.host_loss_regular = self._create_variable(name='host_loss_regular', src=['host'], dst=['guest'])
self.host_optim_gradient = self._create_variable(name='host_optim_gradient', src=['arbiter'], dst=['host'])
self.host_partial_prediction = self._create_variable(
name='host_partial_prediction', src=['host'], dst=['guest'])
self.host_sqn_forwards = self._create_variable(name='host_sqn_forwards', src=['host'], dst=['guest'])
self.loss = self._create_variable(name='loss', src=['guest'], dst=['arbiter'])
self.loss_intermediate = self._create_variable(name='loss_intermediate', src=['host'], dst=['guest'])
self.paillier_pubkey = self._create_variable(name='paillier_pubkey', src=['arbiter'], dst=['host', 'guest'])
self.sqn_sample_index = self._create_variable(name='sqn_sample_index', src=['guest'], dst=['host'])
self.use_async = self._create_variable(name='use_async', src=['guest'], dst=['host'])
| 3,308 | 60.277778 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/converge_checker_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class ConvergeCheckerTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.square_sum = self._create_variable(name='square_sum', src=['host'], dst=['guest'])
self.converge_info = self._create_variable(name='converge_info', src=['guest'], dst=['host'])
| 1,341 | 37.342857 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_boosting_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroBoostingTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.booster_dim = self._create_variable(name='booster_dim', src=['guest'], dst=['host'])
self.stop_flag = self._create_variable(name='stop_flag', src=['guest'], dst=['host'])
self.predict_start_round = self._create_variable(name='predict_start_round', src=['guest'], dst=['host'])
| 1,448 | 39.25 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/intersection_func_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class IntersectionFuncTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.id_map_from_guest = self._create_variable(name='id_map_from_guest', src=['guest'], dst=['host'])
self.id_map_from_host = self._create_variable(name='id_map_from_host', src=['host'], dst=['guest'])
self.info_share_from_host = self._create_variable(name='info_share_from_host', src=['host'], dst=['guest'])
self.info_share_from_guest = self._create_variable(name='info_share_from_guest', src=['guest'], dst=['host'])
self.join_id_from_guest = self._create_variable(name='join_id_from_guest', src=['guest'], dst=['host'])
self.join_id_from_host = self._create_variable(name='join_id_from_host', src=['host'], dst=['guest'])
self.intersect_filter_from_host = self._create_variable(name='intersect_filter_from_host', src=['host'],
dst=['guest'])
self.intersect_filter_from_guest = self._create_variable(name='intersect_filter_from_guest', src=['guest'],
dst=['host'])
self.cache_id = self._create_variable(name='cache_id', src=['guest', 'host'], dst=['host', 'guest'])
self.cache_id_from_host = self._create_variable(name='cache_id_from_host', src=['host'], dst=['guest'])
self.use_match_id = self._create_variable(name='use_match_id', src=['host'], dst=['guest'])
| 2,526 | 53.934783 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_binning_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoBinningTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.local_static_values = self._create_variable(name='local_static_values', src=['guest', 'host'],
dst=['arbiter'])
self.global_static_values = self._create_variable(name='global_static_values', src=["arbiter"],
dst=['guest', 'host'])
self.query_array = self._create_variable(name='query_array', src=["arbiter"],
dst=['guest', 'host'])
self.is_converge = self._create_variable(name='is_converge', src=['guest'], dst=['arbiter'])
| 1,765 | 43.15 | 107 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/match_id_intersect_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class MatchIDIntersectTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.id_map_from_guest = self._create_variable(name='id_map_from_guest', src=['guest'], dst=['host'])
self.id_map_from_host = self._create_variable(name='id_map_from_host', src=['host'], dst=['guest'])
| 1,362 | 37.942857 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/data_split_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class DataSplitTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.id_train = self._create_variable(name='id_train', src=['guest'], dst=['host'])
self.id_test = self._create_variable(name='id_test', src=['guest'], dst=['host'])
self.id_validate = self._create_variable(name='id_validate', src=['guest'], dst=['host'])
| 1,417 | 38.388889 | 97 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_dnn_lr_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroDNNLRTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.guest_dec_gradient = self._create_variable(name='guest_dec_gradient', src=['arbiter'], dst=['guest'])
self.guest_enc_gradient = self._create_variable(name='guest_enc_gradient', src=['guest'], dst=['arbiter'])
self.host_dec_gradient = self._create_variable(name='host_dec_gradient', src=['arbiter'], dst=['host'])
self.host_enc_gradient = self._create_variable(name='host_enc_gradient', src=['host'], dst=['arbiter'])
| 1,593 | 42.081081 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/stepwise_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class StepwiseTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.guest_data_info = self._create_variable(name='guest_data_info', src=['guest'], dst=['arbiter', 'host'])
self.host_data_info = self._create_variable(name='host_data_info', src=['host'], dst=['arbiter', 'guest'])
self.step_best = self._create_variable(name='step_best', src=['arbiter'], dst=['host', 'guest'])
| 1,473 | 39.944444 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_onehot_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoOneHotTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.aligned_columns = self._create_variable(name='aligned_columns', src=['arbiter'], dst=['guest', 'host'])
self.guest_columns = self._create_variable(name='guest_columns', src=['guest'], dst=['arbiter'])
self.host_columns = self._create_variable(name='host_columns', src=['host'], dst=['arbiter'])
| 1,462 | 39.638889 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_decision_tree_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoDecisionTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.best_split_points = self._create_variable(name='best_split_points', src=['arbiter'], dst=['guest', 'host'])
self.node_sample_num = self._create_variable(name='node_sample_num', src=['guest', 'host'], dst=['arbiter'])
self.cur_layer_node_num = self._create_variable(
name='cur_layer_node_num', src=[
'guest', 'host'], dst=['arbiter'])
| 1,535 | 39.421053 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/dh_intersect_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class DhIntersectTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.id_ciphertext_list_exchange_g2h = self._create_variable(
name='id_ciphertext_list_exchange_g2h', src=['guest'], dst=['host'])
self.id_ciphertext_list_exchange_h2g = self._create_variable(
name='id_ciphertext_list_exchange_h2g', src=['host'], dst=['guest'])
self.doubly_encrypted_id_list = self._create_variable(
name='doubly_encrypted_id_list', src=['host'], dst=['guest'])
self.intersect_ids = self._create_variable(name='intersect_ids', src=['guest'], dst=['host'])
self.cardinality = self._create_variable(name='cardinality', src=['guest'], dst=['host'])
self.host_filter = self._create_variable(name='host_filter', src=['host'], dst=['guest'])
| 1,876 | 43.690476 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/homo_secure_boost_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HomoSecureBoostingTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.tree_dim = self._create_variable(name='tree_dim', src=['guest', 'host'], dst=['arbiter'])
self.feature_number = self._create_variable(name='feature_number', src=['guest', 'host'], dst=['arbiter'])
self.loss_status = self._create_variable(name='loss_status', src=['guest', 'host'], dst=['arbiter'])
self.stop_flag = self._create_variable(name='stop_flag', src=['arbiter'], dst=['guest', 'host'])
self.local_labels = self._create_variable(name='local_labels', src=['guest', 'host'], dst=['arbiter'])
self.label_mapping = self._create_variable(name='label_mapping', src=['arbiter'], dst=['guest', 'host'])
self.valid_features = self._create_variable(name='valid_features', src=['arbiter'], dst=['guest', 'host'])
| 1,921 | 47.05 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/hetero_decision_tree_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class HeteroDecisionTreeTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.dispatch_node_host = self._create_variable(name='dispatch_node_host', src=['guest'], dst=['host'])
self.dispatch_node_host_result = self._create_variable(
name='dispatch_node_host_result', src=['host'], dst=['guest'])
self.encrypted_grad_and_hess = self._create_variable(
name='encrypted_grad_and_hess', src=['guest'], dst=['host'])
self.encrypted_splitinfo_host = self._create_variable(
name='encrypted_splitinfo_host', src=['host'], dst=['guest'])
self.federated_best_splitinfo_host = self._create_variable(
name='federated_best_splitinfo_host', src=['guest'], dst=['host'])
self.final_splitinfo_host = self._create_variable(name='final_splitinfo_host', src=['host'], dst=['guest'])
self.node_positions = self._create_variable(name='node_positions', src=['guest'], dst=['host'])
self.predict_data = self._create_variable(name='predict_data', src=['guest'], dst=['host'])
self.predict_data_by_host = self._create_variable(name='predict_data_by_host', src=['host'], dst=['guest'])
self.predict_finish_tag = self._create_variable(name='predict_finish_tag', src=['guest'], dst=['host'])
self.tree = self._create_variable(name='tree', src=['guest'], dst=['host'])
self.tree_node_queue = self._create_variable(name='tree_node_queue', src=['guest'], dst=['host'])
self.host_cur_to_split_node_num = self._create_variable(
name='host_cur_to_split_node_num', src=['host'], dst=['guest'])
self.host_leafs = self._create_variable(name='host_leafs', src=['host'], dst=['guest'])
self.sync_flag = self._create_variable(name='sync_flag', src=['guest'], dst=['host'])
self.cipher_compressor_para = self._create_variable(name='cipher_compressor_para', src=['guest'], dst=['host'])
| 3,005 | 54.666667 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/oblivious_transfer_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class ObliviousTransferTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.s = self._create_variable(name='s', src=['host'], dst=['guest'])
self.s_legal = self._create_variable(name='s_legal', src=['guest'], dst=['host'])
self.r = self._create_variable(name='r', src=['guest'], dst=['host'])
| 1,391 | 37.666667 | 89 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/ecdh_intersect_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class EcdhIntersectTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.id_ciphertext_exchange_g2h = self._create_variable(
name='id_ciphertext_exchange_g2h', src=['guest'], dst=['host'])
self.id_ciphertext_exchange_h2g = self._create_variable(
name='id_ciphertext_exchange_h2g', src=['host'], dst=['guest'])
self.doubly_encrypted_id = self._create_variable(
name='doubly_encrypted_id', src=['host'], dst=['guest'])
self.intersect_ids = self._create_variable(name='intersect_ids', src=['guest'], dst=['host'])
self.cardinality = self._create_variable(name='cardinality', src=['guest'], dst=['host'])
self.host_filter = self._create_variable(name='host_filter', src=['host'], dst=['guest'])
| 1,848 | 43.02381 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/transfer_variable/transfer_class/validation_strategy_transfer_variable.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# AUTO GENERATED TRANSFER VARIABLE CLASS. DO NOT MODIFY
#
################################################################################
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
# noinspection PyAttributeOutsideInit
class ValidationStrategyVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.validation_status = self._create_variable(name='validation_status', src=['guest'], dst=['host', 'arbiter'])
| 1,259 | 36.058824 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.criterion import XgboostCriterion
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import Splitter
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import FeatureHistogram
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import HistogramBag, \
FeatureHistogramWeights
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_host import HeteroDecisionTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_guest import \
HeteroDecisionTreeGuest
from federatedml.ensemble.secureboost.hetero_secoreboost.hetero_secureboost_guest import HeteroSecureBoostingTreeGuest
from federatedml.ensemble.secureboost.hetero_secoreboost.hetero_secureboost_host import HeteroSecureBoostingTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_aggregator import \
DecisionTreeClientAggregator, DecisionTreeArbiterAggregator
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_client import HomoDecisionTreeClient
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_arbiter import HomoDecisionTreeArbiter
from federatedml.ensemble.secureboost.homo_secureboost.homo_secureboost_client import HomoSecureBoostingTreeClient
from federatedml.ensemble.secureboost.homo_secureboost.homo_secureboost_arbiter import HomoSecureBoostingTreeArbiter
__all__ = [
"Node",
"HeteroDecisionTreeHost",
"HeteroDecisionTreeGuest",
"Splitter",
"FeatureHistogram",
"XgboostCriterion",
"DecisionTree",
'SplitInfo',
"HomoDecisionTreeClient",
"HomoDecisionTreeArbiter",
"DecisionTreeArbiterAggregator",
'DecisionTreeClientAggregator',
"HeteroSecureBoostingTreeGuest",
"HeteroSecureBoostingTreeHost",
"HomoSecureBoostingTreeArbiter",
"HomoSecureBoostingTreeClient",
"HistogramBag",
"FeatureHistogramWeights"]
| 2,974 | 48.583333 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/algorithm_prototype.py
|
import abc
from abc import ABC
class BasicAlgorithms(ABC):
@abc.abstractmethod
def get_sample_weights(self, *args):
pass
@abc.abstractmethod
def fit(self, *args):
pass
@abc.abstractmethod
def predict(self, *args):
pass
@abc.abstractmethod
def get_model(self, *args):
pass
@abc.abstractmethod
def load_model(self, *args):
pass
| 413 | 14.923077 | 40 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/__init__.py
|
from federatedml.ensemble.basic_algorithms.algorithm_prototype import BasicAlgorithms
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_guest import \
HeteroDecisionTreeGuest
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_host import HeteroDecisionTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_fast_decision_tree_guest import \
HeteroFastDecisionTreeGuest
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_fast_decision_tree_host import \
HeteroFastDecisionTreeHost
__all__ = ["BasicAlgorithms", "HeteroDecisionTreeGuest", "HeteroDecisionTreeHost", "HeteroFastDecisionTreeGuest",
"HeteroFastDecisionTreeHost"]
| 763 | 57.769231 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/feature_importance.py
|
class FeatureImportance(object):
def __init__(self, importance=0, importance_2=0, main_type='split'):
self.legal_type = ['split', 'gain']
assert main_type in self.legal_type, 'illegal importance type {}'.format(main_type)
self.importance = importance
self.importance_2 = importance_2
self.main_type = main_type
def add_gain(self, val):
if self.main_type == 'gain':
self.importance += val
else:
self.importance_2 += val
def add_split(self, val):
if self.main_type == 'split':
self.importance += val
else:
self.importance_2 += val
def from_protobuf(self, feature_importance):
self.main_type = feature_importance.main
self.importance = feature_importance.importance
self.importance_2 = feature_importance.importance2
if self.main_type == 'split':
self.importance = int(self.importance)
def __cmp__(self, other):
if self.importance > other.importance:
return 1
elif self.importance < other.importance:
return -1
else:
return 0
def __eq__(self, other):
return self.importance == other.importance
def __lt__(self, other):
return self.importance < other.importance
def __repr__(self):
return 'importance type: {}, importance: {}, importance2 {}'.format(self.main_type, self.importance,
self.importance_2)
def __add__(self, other):
new_importance = FeatureImportance(main_type=self.main_type, importance=self.importance + other.importance,
importance_2=self.importance_2 + other.importance_2)
return new_importance
| 1,837 | 33.679245 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/feature_histogram.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# FeatureHistogram
# =============================================================================
import copy
import functools
import numpy as np
from operator import add, sub
import scipy.sparse as sp
import uuid
from typing import List
from fate_arch.session import computing_session as session
from fate_arch.common import log
from federatedml.feature.fate_element_type import NoneType
from federatedml.framework.weights import Weights
LOGGER = log.getLogger()
# ret type
TENSOR = 'tensor'
TABLE = 'tb'
class HistogramBag(object):
"""
holds histograms
"""
def __init__(self, tensor: list, hid: int = -1, p_hid: int = -1):
"""
:param tensor: list returned by calculate_histogram
:param hid: histogram id
:param p_hid: parent node histogram id
:param tensor_type: 'list' or 'array'
"""
self.hid = hid
self.p_hid = p_hid
self.bag = tensor
self.tensor_type = type(self.bag)
def binary_op(self, other, func, inplace=False):
assert isinstance(other, HistogramBag)
assert len(self.bag) == len(other)
bag = self.bag
newbag = None
if not inplace:
newbag = copy.deepcopy(other)
bag = newbag.bag
for bag_idx in range(len(self.bag)):
for hist_idx in range(len(self.bag[bag_idx])):
bag[bag_idx][hist_idx][0] = func(self.bag[bag_idx][hist_idx][0], other[bag_idx][hist_idx][0])
bag[bag_idx][hist_idx][1] = func(self.bag[bag_idx][hist_idx][1], other[bag_idx][hist_idx][1])
bag[bag_idx][hist_idx][2] = func(self.bag[bag_idx][hist_idx][2], other[bag_idx][hist_idx][2])
return self if inplace else newbag
def __add__(self, other):
if self.tensor_type == 'list':
return self.binary_op(other, add, inplace=False)
elif self.tensor_type == 'array':
self.bag += other.bag
return self
else:
raise ValueError('unknown tensor type')
def __sub__(self, other):
if self.tensor_type == list:
return self.binary_op(other, sub, inplace=False)
elif self.tensor_type == np.ndarray:
self.bag -= other.bag
return self
else:
raise ValueError('unknown tensor type')
def __len__(self):
return len(self.bag)
def __getitem__(self, item):
return self.bag[item]
def __str__(self):
return str(self.bag)
def __repr__(self):
return str(self.bag)
class FeatureHistogramWeights(Weights):
def __init__(self, list_of_histogram_bags: List[HistogramBag]):
self.hists = list_of_histogram_bags
super(FeatureHistogramWeights, self).__init__(l=list_of_histogram_bags)
def map_values(self, func, inplace):
if inplace:
hists = self.hists
else:
hists = copy.deepcopy(self.hists)
for histbag in hists:
bag = histbag.bag
for component_idx in range(len(bag)):
for hist_idx in range(len(bag[component_idx])):
bag[component_idx][hist_idx][0] = func(bag[component_idx][hist_idx][0])
bag[component_idx][hist_idx][1] = func(bag[component_idx][hist_idx][1])
bag[component_idx][hist_idx][2] = func(bag[component_idx][hist_idx][2])
if inplace:
return self
else:
return FeatureHistogramWeights(list_of_histogram_bags=hists)
def binary_op(self, other: 'FeatureHistogramWeights', func, inplace: bool):
new_weights = []
hists, other_hists = self.hists, other.hists
for h1, h2 in zip(hists, other_hists):
rnt = h1.binary_op(h2, func, inplace=inplace)
if not inplace:
new_weights.append(rnt)
if inplace:
return self
else:
return FeatureHistogramWeights(new_weights)
def axpy(self, a, y: 'FeatureHistogramWeights'):
def func(x1, x2): return x1 + a * x2
self.binary_op(y, func, inplace=True)
return self
def __iter__(self):
pass
def __str__(self):
return str([str(hist) for hist in self.hists])
def __repr__(self):
return str(self.hists)
class FeatureHistogram(object):
def __init__(self):
self._cur_to_split_node_info = {}
self._prev_layer_cached_histograms = {}
self._cur_layer_cached_histograms = {}
self._cur_dep = -1
self._prev_layer_dtable = None
self._cur_layer_dtables = [None]
self.stable_reduce = False
"""
Public Interface for Histogram Computation
"""
def compute_histogram(self, dep, data_bin, grad_and_hess, bin_split_points, bin_sparse_points,
valid_features, node_map,
node_sample_count,
use_missing=False,
zero_as_missing=False,
ret="tensor",
hist_sub=True,
cur_to_split_nodes=None
):
"""
This the new interface for histogram computation
"""
if hist_sub:
# if run histogram subtraction, need to trim node map, and get parent/sibling node info for computation
LOGGER.info('get histogram using histogram subtraction')
self._update_node_info(cur_to_split_nodes)
to_compute_node_map, sibling_node_id_map = self._trim_node_map(node_map, node_sample_count)
parent_node_id_map = self._get_parent_nid_map()
LOGGER.debug('histogram subtraction at dep {}, new node map is {}, sibling node map is {}, '
'cur to split node info is {}, parent node id map is {}'.
format(dep, to_compute_node_map, sibling_node_id_map, self._cur_to_split_node_info,
parent_node_id_map))
else:
# else use original node map
to_compute_node_map = node_map
sibling_node_id_map = None
parent_node_id_map = None
if ret == TENSOR:
histograms = self.calculate_histogram(data_bin, grad_and_hess,
bin_split_points, bin_sparse_points,
valid_features, to_compute_node_map,
use_missing, zero_as_missing, ret=ret)
if not hist_sub:
return histograms
# running hist sub
self._update_cached_histograms(dep, ret=ret)
if self._is_root_node(node_map): # root node need no hist sub
self._cur_layer_cached_histograms[0] = histograms[0]
result = histograms
else:
node_id_list, result = self._tensor_subtraction(histograms, to_compute_node_map)
self._cached_histograms((node_id_list, result), ret=ret)
return result
elif ret == 'tb':
LOGGER.debug('maps are {} {}'.format(parent_node_id_map, sibling_node_id_map))
LOGGER.info('computing histogram table using normal mode')
histogram_table = self.calculate_histogram(data_bin, grad_and_hess,
bin_split_points, bin_sparse_points,
valid_features, to_compute_node_map,
use_missing, zero_as_missing,
ret=ret,
parent_node_id_map=parent_node_id_map,
sibling_node_id_map=sibling_node_id_map)
if not hist_sub:
return histogram_table
# running hist sub
self._update_cached_histograms(dep, ret=ret)
if self._is_root_node(node_map): # root node need not hist sub
self._cur_layer_dtables.append(histogram_table)
result = histogram_table
else:
result = self._table_subtraction(histogram_table)
self._cached_histograms(result, ret=ret)
return result
def calculate_histogram(self, data_bin, grad_and_hess,
bin_split_points, bin_sparse_points,
valid_features=None,
node_map=None,
use_missing=False,
zero_as_missing=False,
parent_node_id_map=None,
sibling_node_id_map=None,
ret=TENSOR):
"""
This is the old interface for histogram computation
data_bin: data after binning with node positions
grad_and_hess: g/h for each sample
bin_split_points: split points
bin_sparse_points: sparse points
node_map: node id to node index
use_missing: enable use missing
zero_as_missing: enable zero as missing
parent_node_id_map: map current node_id to its parent id, this para is for hist sub
sibling_node_id_map: map current node_id to its sibling id, this para is for hist sub
ret: return type, if 'tb', return histograms stored in Table
"""
LOGGER.debug("bin_shape is {}, node num is {}".format(bin_split_points.shape, len(node_map)))
if grad_and_hess.count() == 0:
raise ValueError('input grad and hess is empty')
# histogram template will be adjusted when running mo tree
mo_dim = None
g_h_example = grad_and_hess.take(1)
if isinstance(g_h_example[0][1][0], np.ndarray) and len(g_h_example[0][1][0]) > 1:
mo_dim = len(g_h_example[0][1][0])
# reformat, now format is: key, ((data_instance, node position), (g, h))
batch_histogram_intermediate_rs = data_bin.join(grad_and_hess, lambda data_inst, g_h: (data_inst, g_h))
if batch_histogram_intermediate_rs.count() == 0: # if input sample number is 0, return empty histograms
node_histograms = FeatureHistogram._generate_histogram_template(node_map, bin_split_points, valid_features,
1 if use_missing else 0, mo_dim=mo_dim)
hist_list = FeatureHistogram._generate_histogram_key_value_list(node_histograms, node_map, bin_split_points,
parent_node_id_map=parent_node_id_map,
sibling_node_id_map=sibling_node_id_map)
if ret == TENSOR:
feature_num = bin_split_points.shape[0]
return FeatureHistogram._recombine_histograms(hist_list, node_map, feature_num)
else:
histograms_table = session.parallelize(hist_list, partition=data_bin.partitions, include_key=True)
return FeatureHistogram._construct_table(histograms_table)
else: # compute histograms
batch_histogram_cal = functools.partial(
FeatureHistogram._batch_calculate_histogram,
bin_split_points=bin_split_points, bin_sparse_points=bin_sparse_points,
valid_features=valid_features, node_map=node_map,
use_missing=use_missing, zero_as_missing=zero_as_missing,
parent_nid_map=parent_node_id_map,
sibling_node_id_map=sibling_node_id_map,
stable_reduce=self.stable_reduce,
mo_dim=mo_dim
)
agg_func = self._stable_hist_aggregate if self.stable_reduce else self._hist_aggregate
histograms_table = batch_histogram_intermediate_rs.mapReducePartitions(batch_histogram_cal, agg_func)
if self.stable_reduce:
histograms_table = histograms_table.mapValues(self._stable_hist_reduce)
if ret == "tensor":
feature_num = bin_split_points.shape[0]
histogram_list = list(histograms_table.collect())
rs = FeatureHistogram._recombine_histograms(histogram_list, node_map, feature_num)
return rs
else:
return FeatureHistogram._construct_table(histograms_table)
"""
Histogram computation functions
"""
@staticmethod
def _tensor_histogram_cumsum(histograms):
# histogram cumsum, from left to right
for i in range(1, len(histograms)):
for j in range(len(histograms[i])):
histograms[i][j] += histograms[i - 1][j]
return histograms
@staticmethod
def _dtable_histogram_cumsum(histograms):
# histogram cumsum, from left to right
if len(histograms) == 0:
return histograms
new_hist = [[0, 0, 0] for i in range(len(histograms))]
new_hist[0][0] = copy.deepcopy(histograms[0][0])
new_hist[0][1] = copy.deepcopy(histograms[0][1])
new_hist[0][2] = copy.deepcopy(histograms[0][2])
for i in range(1, len(histograms)):
# ciphertext cumsum skipping
if histograms[i][2] == 0:
new_hist[i] = new_hist[i - 1]
continue
for j in range(len(histograms[i])):
new_hist[i][j] = new_hist[i - 1][j] + histograms[i][j]
return new_hist
@staticmethod
def _host_histogram_cumsum_map_func(v):
fid, histograms = v
new_value = (fid, FeatureHistogram._dtable_histogram_cumsum(histograms))
return new_value
@staticmethod
def _hist_aggregate(fid_histogram1, fid_histogram2):
# add histograms with same key((node id, feature id)) together
fid_1, histogram1 = fid_histogram1
fid_2, histogram2 = fid_histogram2
aggregated_res = [[] for i in range(len(histogram1))]
for i in range(len(histogram1)):
for j in range(len(histogram1[i])):
aggregated_res[i].append(histogram1[i][j] + histogram2[i][j])
return fid_1, aggregated_res
@staticmethod
def _stable_hist_aggregate(fid_histogram1, fid_histogram2):
partition_id_list_1, hist_val_list_1 = fid_histogram1
partition_id_list_2, hist_val_list_2 = fid_histogram2
value = [partition_id_list_1 + partition_id_list_2, hist_val_list_1 + hist_val_list_2]
return value
@staticmethod
def _stable_hist_reduce(value):
# [partition1, partition2, ...], [(fid, hist), (fid, hist) .... ]
partition_id_list, hist_list = value
order = np.argsort(partition_id_list)
aggregated_hist = None
for idx in order: # make sure reduce in order to avoid float error
hist = hist_list[idx]
if aggregated_hist is None:
aggregated_hist = hist
continue
aggregated_hist = FeatureHistogram._hist_aggregate(aggregated_hist, hist)
return aggregated_hist
@staticmethod
def _generate_histogram_template(node_map: dict, bin_split_points: np.ndarray, valid_features: dict,
missing_bin, mo_dim=None):
# for every feature, generate histograms containers (initialized val are 0s)
node_num = len(node_map)
node_histograms = []
for k in range(node_num):
feature_histogram_template = []
for fid in range(bin_split_points.shape[0]):
# if is not valid features, skip generating
if valid_features is not None and valid_features[fid] is False:
feature_histogram_template.append([])
continue
else:
# 0, 0, 0 -> grad, hess, sample count
if mo_dim:
feature_histogram_template.append([[np.zeros(mo_dim), np.zeros(mo_dim), 0]
for j in
range(bin_split_points[fid].shape[0] + missing_bin)])
else:
feature_histogram_template.append([[0, 0, 0]
for j in
range(bin_split_points[fid].shape[0] + missing_bin)])
node_histograms.append(feature_histogram_template)
# check feature num
assert len(feature_histogram_template) == bin_split_points.shape[0]
return node_histograms
@staticmethod
def _generate_histogram_key_value_list(node_histograms, node_map, bin_split_points, parent_node_id_map,
sibling_node_id_map, partition_key=None):
# generate key_value hist list for Table parallelization
ret = []
inverse_map = FeatureHistogram._inverse_node_map(node_map)
for node_idx in range(len(node_map)):
for fid in range(bin_split_points.shape[0]):
# key: (nid, fid), value: (fid, hist)
# if parent_nid is offered, map nid to its parent nid for histogram subtraction
node_id = inverse_map[node_idx]
key = (parent_node_id_map[node_id], fid) if parent_node_id_map is not None else (node_id, fid)
# if sibling_node_id_map is offered, recorded its sibling ids for histogram subtraction
value = (fid, node_histograms[node_idx][fid]) if sibling_node_id_map is None else \
((fid, node_id, sibling_node_id_map[node_id]), node_histograms[node_idx][fid])
if partition_key is not None:
value = [[partition_key], [value]]
ret.append((key, value))
return ret
@staticmethod
def _batch_calculate_histogram(kv_iterator, bin_split_points=None,
bin_sparse_points=None, valid_features=None,
node_map=None, use_missing=False, zero_as_missing=False,
parent_nid_map=None, sibling_node_id_map=None, stable_reduce=False,
mo_dim=None):
data_bins = []
node_ids = []
grad = []
hess = []
data_record = 0 # total instance number of this partition
partition_key = None # this var is for stable reduce
# go through iterator to collect g/h feature instances/ node positions
for data_id, value in kv_iterator:
if partition_key is None and stable_reduce: # first key of data is used as partition key
partition_key = data_id
data_bin, nodeid_state = value[0]
unleaf_state, nodeid = nodeid_state
if unleaf_state == 0 or nodeid not in node_map:
continue
g, h = value[1] # encrypted text in host, plaintext in guest
data_bins.append(data_bin) # features
node_ids.append(nodeid) # current node position
grad.append(g)
hess.append(h)
data_record += 1
LOGGER.debug("begin batch calculate histogram, data count is {}".format(data_record))
node_num = len(node_map)
missing_bin = 1 if use_missing else 0
# if the value of a feature is 0, the corresponding bin index will not appear in the sample sparse vector
# need to compute correct sparse point g_sum and s_sum by:
# (node total sum value) - (node feature total sum value) + (non 0 sparse point sum)
# [0, 0, 0] -> g, h, sample count
zero_optim = [[[0 for i in range(3)]
for j in range(bin_split_points.shape[0])]
for k in range(node_num)]
zero_opt_node_sum = [[0 for i in range(3)]
for j in range(node_num)]
node_histograms = FeatureHistogram._generate_histogram_template(node_map, bin_split_points, valid_features,
missing_bin, mo_dim=mo_dim)
for rid in range(data_record):
# node index is the position in the histogram list of a certain node
node_idx = node_map.get(node_ids[rid])
# node total sum value
zero_opt_node_sum[node_idx][0] += grad[rid]
zero_opt_node_sum[node_idx][1] += hess[rid]
zero_opt_node_sum[node_idx][2] += 1
for fid, value in data_bins[rid].features.get_all_data():
if valid_features is not None and valid_features[fid] is False:
continue
if use_missing and value == NoneType():
# missing value is set as -1
value = -1
node_histograms[node_idx][fid][value][0] += grad[rid]
node_histograms[node_idx][fid][value][1] += hess[rid]
node_histograms[node_idx][fid][value][2] += 1
for nid in range(node_num):
# cal feature level g_h incrementally
for fid in range(bin_split_points.shape[0]):
if valid_features is not None and valid_features[fid] is False:
continue
for bin_index in range(len(node_histograms[nid][fid])):
zero_optim[nid][fid][0] += node_histograms[nid][fid][bin_index][0]
zero_optim[nid][fid][1] += node_histograms[nid][fid][bin_index][1]
zero_optim[nid][fid][2] += node_histograms[nid][fid][bin_index][2]
for node_idx in range(node_num):
for fid in range(bin_split_points.shape[0]):
if valid_features is not None and valid_features[fid] is True:
if not use_missing or (use_missing and not zero_as_missing):
# add 0 g/h sum to sparse point
sparse_point = bin_sparse_points[fid]
node_histograms[node_idx][fid][sparse_point][0] += zero_opt_node_sum[node_idx][0] - \
zero_optim[node_idx][fid][
0]
node_histograms[node_idx][fid][sparse_point][1] += zero_opt_node_sum[node_idx][1] - \
zero_optim[node_idx][fid][
1]
node_histograms[node_idx][fid][sparse_point][2] += zero_opt_node_sum[node_idx][2] - \
zero_optim[node_idx][fid][
2]
else:
# if 0 is regarded as missing value, add to missing bin
node_histograms[node_idx][fid][-1][0] += zero_opt_node_sum[node_idx][0] - \
zero_optim[node_idx][fid][0]
node_histograms[node_idx][fid][-1][1] += zero_opt_node_sum[node_idx][1] - \
zero_optim[node_idx][fid][1]
node_histograms[node_idx][fid][-1][2] += zero_opt_node_sum[node_idx][2] - \
zero_optim[node_idx][fid][2]
ret = FeatureHistogram._generate_histogram_key_value_list(node_histograms, node_map, bin_split_points,
parent_nid_map, sibling_node_id_map,
partition_key=partition_key)
return ret
@staticmethod
def _recombine_histograms(histograms_list: list, node_map, feature_num):
histograms = [[[] for j in range(feature_num)] for k in range(len(node_map))]
for tuple_ in histograms_list:
node_id, fid = tuple_[0]
node_idx = node_map[node_id]
histograms[int(node_idx)][int(fid)] = FeatureHistogram._tensor_histogram_cumsum(tuple_[1][1])
return histograms
@staticmethod
def _construct_table(histograms_table):
histograms_table = histograms_table.mapValues(FeatureHistogram._host_histogram_cumsum_map_func)
return histograms_table
"""
Histogram subtraction functions
"""
def _update_node_info(self, nodes):
"""
generate node summaries for hist subtraction
"""
if nodes is None:
raise ValueError('node list should not be None if histogram subtraction is enabled')
self._cur_to_split_node_info = {}
for node in nodes:
node_id = node.id
self._cur_to_split_node_info[node_id] = {'pid': node.parent_nodeid, 'is_left_node': node.is_left_node}
@staticmethod
def _is_root_node(node_map):
"""
check if current to split is root node
"""
return 0 in node_map
def _update_cached_histograms(self, dep, ret='tensor'):
"""
update cached parent histograms
"""
if dep != self._cur_dep and ret == 'tensor':
del self._prev_layer_cached_histograms # delete previous cached histograms
self._prev_layer_cached_histograms = self._cur_layer_cached_histograms # update cached histograms
self._cur_layer_cached_histograms = {} # for caching new histograms
self._cur_dep = dep
elif dep != self._cur_dep and ret == 'tb':
del self._prev_layer_dtable
self._prev_layer_dtable = self._cur_layer_dtables[0]
for table in self._cur_layer_dtables[1:]:
self._prev_layer_dtable = self._prev_layer_dtable.union(table)
self._cur_layer_dtables = []
self._cur_dep = dep
LOGGER.info('hist subtraction dep is updated to {}'.format(self._cur_dep))
def _cached_histograms(self, histograms, ret='tensor'):
"""
cached cur layer histograms
"""
if ret == 'tb':
self._cur_layer_dtables.append(histograms)
elif ret == 'tensor':
result_nid, result = histograms
for node_id, result_hist in zip(result_nid, result):
self._cur_layer_cached_histograms[node_id] = result_hist
@staticmethod
def _inverse_node_map(node_map):
return {v: k for k, v in node_map.items()}
def _is_left(self, node_id):
"""
check if it is left node
"""
return self._cur_to_split_node_info[node_id]['is_left_node']
def _get_parent_nid_map(self, ):
"""
get a map that can map a node to its parent node
"""
rs = {}
for nid in self._cur_to_split_node_info:
if nid == 0:
return None
rs[nid] = self._cur_to_split_node_info[nid]['pid']
return rs
@staticmethod
def _trim_node_map(node_map, leaf_sample_counts):
"""
Only keep the nodes with fewer sample and remove their siblings, for accelerating hist computation
"""
inverse_node_map = {v: k for k, v in node_map.items()}
sibling_node_map = {}
# if is root node, return directly
if 0 in node_map:
return node_map, None
kept_node_id = []
idx = 0
for left_count, right_count in zip(leaf_sample_counts[0::2], leaf_sample_counts[1::2]):
if left_count < right_count:
kept_node_id.append(inverse_node_map[idx])
sibling_node_map[inverse_node_map[idx]] = inverse_node_map[idx + 1]
else:
kept_node_id.append(inverse_node_map[idx + 1])
sibling_node_map[inverse_node_map[idx + 1]] = inverse_node_map[idx]
idx += 2
new_node_map = {node_id: idx for idx, node_id in enumerate(kept_node_id)}
return new_node_map, sibling_node_map
@staticmethod
def _g_h_count_sub(hist_a, hist_b):
return hist_a[0] - hist_b[0], hist_a[1] - hist_b[1], hist_a[2] - hist_b[2]
@staticmethod
def _hist_sub(tensor_hist_a, tensor_hist_b):
new_hist = copy.deepcopy(tensor_hist_b)
assert len(tensor_hist_a) == len(tensor_hist_b)
for fid in range(len(tensor_hist_a)):
for bid in range(len(tensor_hist_a[fid])): # if is not a valid feature, bin_num is 0
new_hist[fid][bid][0], new_hist[fid][bid][1], new_hist[fid][bid][2] = FeatureHistogram._g_h_count_sub(
tensor_hist_a[fid][bid], tensor_hist_b[fid][bid])
return new_hist
@staticmethod
def _table_hist_sub(kv):
res = []
for k, v in kv:
parent_hist, son_hist = v
fid, p_hist = parent_hist
(fid, node_id, sib_node_id), s_hist = son_hist
assert len(p_hist) == len(s_hist), 'bin num not equal'
bin_num = len(p_hist)
new_hist = [[0, 0, 0] for i in range(bin_num)]
for bid in range(bin_num):
# get sibling histograms by hist subtraction, if is not a valid feature, bin_num is 0
new_hist[bid][0], new_hist[bid][1], new_hist[bid][2] = FeatureHistogram._g_h_count_sub(p_hist[bid],
s_hist[bid])
# key, value
res.append(((sib_node_id, fid), (fid, new_hist)))
res.append(((node_id, fid), (fid, s_hist)))
return res
def _tensor_subtraction(self, histograms, node_map):
"""
histogram subtraction for tensor format
"""
inverse_node_map = self._inverse_node_map(node_map) # get inverse node map
node_ids = []
p_node_ids = []
for idx in range(len(histograms)):
node_id = inverse_node_map[idx]
node_ids.append(node_id)
p_node_ids.append(self._cur_to_split_node_info[node_id]['pid']) # get parent histograms id
result = []
result_nid = []
for node_id, pid, hist in zip(node_ids, p_node_ids, histograms):
# get sibling histograms by histogram subtraction
parent_hist = self._prev_layer_cached_histograms[pid]
sibling_hist = self._hist_sub(parent_hist, hist)
# is right sibling or left sibling ?
if self._is_left(node_id):
result.append(hist)
result.append(sibling_hist)
result_nid.append(node_id)
result_nid.append(node_id + 1)
else:
result.append(sibling_hist)
result.append(hist)
result_nid.append(node_id - 1)
result_nid.append(node_id)
return result_nid, result
def _table_subtraction(self, histograms):
"""
histogram subtraction for table format
"""
LOGGER.debug('joining parent and son histogram tables')
parent_and_son_hist_table = self._prev_layer_dtable.join(histograms, lambda v1, v2: (v1, v2))
result = parent_and_son_hist_table.mapPartitions(FeatureHistogram._table_hist_sub, use_previous_behavior=False)
return result
| 32,237 | 40.067516 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/node.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Decision Tree Node Struture
# =============================================================================
from federatedml.util import consts
class Node(object):
def __init__(self, id=None, sitename=None, fid=-1,
bid=-1, weight=0, is_leaf=False, sum_grad=None,
sum_hess=None, left_nodeid=-1, right_nodeid=-1,
missing_dir=1, sample_num=0, parent_nodeid=None, is_left_node=False, sibling_nodeid=None,
inst_indices=None):
self.id = id
self.sitename = sitename
self.fid = fid
self.bid = bid
self.weight = weight
self.is_leaf = is_leaf
self.sum_grad = sum_grad
self.sum_hess = sum_hess
self.left_nodeid = left_nodeid
self.right_nodeid = right_nodeid
self.missing_dir = missing_dir
self.parent_nodeid = parent_nodeid
self.sample_num = sample_num
self.is_left_node = is_left_node
self.sibling_nodeid = sibling_nodeid
self.inst_indices = inst_indices
def __str__(self):
return "id{}, fid:{},bid:{},weight:{},sum_grad:{},sum_hess:{},left_node:{},right_node:{}, sitename:{}, " \
"is leaf {}".format(self.id,
self.fid, self.bid, self.weight, self.sum_grad, self.sum_hess, self.left_nodeid,
self.right_nodeid,
self.sitename, self.is_leaf
)
| 2,413 | 38.57377 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/splitter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
#
# =============================================================================
import numpy as np
import warnings
import functools
import random
from fate_arch.session import computing_session as session
from fate_arch.common import log
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.criterion import XgboostCriterion
from federatedml.util import consts
LOGGER = log.getLogger()
class SplitInfo(object):
def __init__(self, sitename=consts.GUEST, best_fid=None, best_bid=None,
sum_grad=0, sum_hess=0, gain=None, missing_dir=1, mask_id=None, sample_count=-1):
self.sitename = sitename
self.best_fid = best_fid
self.best_bid = best_bid
self.sum_grad = sum_grad
self.sum_hess = sum_hess
self.gain = gain
self.missing_dir = missing_dir
self.mask_id = mask_id
self.sample_count = sample_count
def __str__(self):
return '(fid {} bid {}, sum_grad {}, sum_hess {}, gain {}, sitename {}, missing dir {}, mask_id {}, ' \
'sample_count {})\n'.format(
self.best_fid, self.best_bid, self.sum_grad, self.sum_hess, self.gain, self.sitename, self.missing_dir,
self.mask_id, self.sample_count)
def __repr__(self):
return self.__str__()
class Splitter(object):
def __init__(self, criterion_method, criterion_params=[0, 0], min_impurity_split=1e-2, min_sample_split=2,
min_leaf_node=1, min_child_weight=1):
LOGGER.info("splitter init!")
if not isinstance(criterion_method, str):
raise TypeError("criterion_method type should be str, but %s find" % (type(criterion_method).__name__))
if criterion_method == "xgboost":
if not criterion_params:
self.criterion = XgboostCriterion()
else:
try:
reg_lambda, reg_alpha = 0, 0
if isinstance(criterion_params, list):
reg_lambda = float(criterion_params[0])
reg_alpha = float(criterion_params[1])
self.criterion = XgboostCriterion(reg_lambda=reg_lambda, reg_alpha=reg_alpha)
except BaseException:
warnings.warn("criterion_params' first criterion_params should be numeric")
self.criterion = XgboostCriterion()
self.min_impurity_split = min_impurity_split
self.min_sample_split = min_sample_split
self.min_leaf_node = min_leaf_node
self.min_child_weight = min_child_weight
def _check_min_child_weight(self, l_h, r_h):
if isinstance(l_h, np.ndarray):
l_h, r_h = np.sum(l_h), np.sum(r_h)
rs = l_h >= self.min_child_weight and r_h >= self.min_child_weight
return rs
def _check_sample_num(self, l_cnt, r_cnt):
return l_cnt >= self.min_leaf_node and r_cnt >= self.min_leaf_node
def find_split_single_histogram_guest(self, histogram, valid_features, sitename, use_missing, zero_as_missing,
reshape_tuple=None):
if reshape_tuple:
histogram = histogram.reshape(reshape_tuple)
# default values
best_fid = None
best_gain = self.min_impurity_split - consts.FLOAT_ZERO
best_bid = None
best_sum_grad_l = None
best_sum_hess_l = None
missing_bin = 0
if use_missing:
missing_bin = 1
# in default, missing value going to right
missing_dir = 1
for fid in range(len(histogram)):
if valid_features[fid] is False:
continue
bin_num = len(histogram[fid])
if bin_num == 0 + missing_bin:
continue
# last bin contains sum values (cumsum from left)
sum_grad = histogram[fid][bin_num - 1][0]
sum_hess = histogram[fid][bin_num - 1][1]
node_cnt = histogram[fid][bin_num - 1][2]
if node_cnt < self.min_sample_split:
break
if node_cnt < 1: # avoid float error
break
# last bin will not participate in split find, so bin_num - 1
for bid in range(bin_num - missing_bin - 1):
# left gh
sum_grad_l = histogram[fid][bid][0]
sum_hess_l = histogram[fid][bid][1]
node_cnt_l = histogram[fid][bid][2]
# right gh
sum_grad_r = sum_grad - sum_grad_l
sum_hess_r = sum_hess - sum_hess_l
node_cnt_r = node_cnt - node_cnt_l
if self._check_min_child_weight(sum_hess_l, sum_hess_r) and self._check_sample_num(node_cnt_l,
node_cnt_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
best_gain = gain
best_fid = fid
best_bid = bid
best_sum_grad_l = sum_grad_l
best_sum_hess_l = sum_hess_l
missing_dir = 1
""" missing value handle: dispatch to left child"""
if use_missing:
# add sum of samples with missing features to left
sum_grad_l += histogram[fid][-1][0] - histogram[fid][-2][0]
sum_hess_l += histogram[fid][-1][1] - histogram[fid][-2][1]
node_cnt_l += histogram[fid][-1][2] - histogram[fid][-2][2]
sum_grad_r -= histogram[fid][-1][0] - histogram[fid][-2][0]
sum_hess_r -= histogram[fid][-1][1] - histogram[fid][-2][1]
node_cnt_r -= histogram[fid][-1][2] - histogram[fid][-2][2]
# if have a better gain value, missing dir is left
if self._check_sample_num(node_cnt_l, node_cnt_r) and self._check_min_child_weight(sum_hess_l,
sum_hess_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
best_gain = gain
best_fid = fid
best_bid = bid
best_sum_grad_l = sum_grad_l
best_sum_hess_l = sum_hess_l
missing_dir = -1
splitinfo = SplitInfo(sitename=sitename, best_fid=best_fid, best_bid=best_bid,
gain=best_gain, sum_grad=best_sum_grad_l, sum_hess=best_sum_hess_l,
missing_dir=missing_dir)
return splitinfo
def find_split(self, histograms, valid_features, partitions=1, sitename=consts.GUEST,
use_missing=False, zero_as_missing=False):
LOGGER.info("splitter find split of raw data")
histogram_table = session.parallelize(histograms, include_key=False, partition=partitions)
splitinfo_table = histogram_table.mapValues(lambda sub_hist:
self.find_split_single_histogram_guest(sub_hist,
valid_features,
sitename,
use_missing,
zero_as_missing))
tree_node_splitinfo = [None for i in range(len(histograms))]
for id, splitinfo in splitinfo_table.collect():
tree_node_splitinfo[id] = splitinfo
return tree_node_splitinfo
def find_split_single_histogram_host(self, fid_with_histogram, valid_features, sitename, use_missing=False,
zero_as_missing=False):
node_splitinfo = []
node_grad_hess = []
missing_bin = 0
if use_missing:
missing_bin = 1
fid, histogram = fid_with_histogram
if valid_features[fid] is False:
return [], []
bin_num = len(histogram)
if bin_num == 0:
return [], []
node_cnt = histogram[bin_num - 1][2]
if node_cnt < self.min_sample_split:
return [], []
for bid in range(bin_num - missing_bin - 1):
sum_grad_l = histogram[bid][0]
sum_hess_l = histogram[bid][1]
node_cnt_l = histogram[bid][2]
node_cnt_r = node_cnt - node_cnt_l
if node_cnt_l >= self.min_leaf_node and node_cnt_r >= self.min_leaf_node:
splitinfo = SplitInfo(sitename=sitename, best_fid=fid,
best_bid=bid, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=1)
node_splitinfo.append(splitinfo)
node_grad_hess.append((sum_grad_l, sum_hess_l))
if use_missing:
sum_grad_l += histogram[-1][0] - histogram[-2][0]
sum_hess_l += histogram[-1][1] - histogram[-2][1]
node_cnt_l += histogram[-1][2] - histogram[-2][2]
splitinfo = SplitInfo(sitename=sitename, best_fid=fid,
best_bid=bid, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=-1)
node_splitinfo.append(splitinfo)
node_grad_hess.append((sum_grad_l, sum_hess_l))
return node_splitinfo, node_grad_hess
def construct_feature_split_points(self, fid_with_histogram, valid_features, sitename, use_missing,
left_missing_dir, right_missing_dir, mask_id_mapping):
feature_split_info = []
missing_bin = 0
if use_missing:
missing_bin = 1
fid, histogram = fid_with_histogram
if valid_features[fid] is False:
return [], None
bin_num = len(histogram)
if bin_num == 0:
return [], None
node_cnt = histogram[bin_num - 1][2]
if node_cnt < self.min_sample_split:
return [], None
for bid in range(bin_num - missing_bin - 1):
sum_grad_l = histogram[bid][0]
sum_hess_l = histogram[bid][1]
node_cnt_l = histogram[bid][2]
node_cnt_r = node_cnt - node_cnt_l
mask_id = mask_id_mapping[(fid, bid)]
if self._check_sample_num(node_cnt_l, node_cnt_r):
missing_dir = np.random.choice(right_missing_dir)
splitinfo = SplitInfo(sitename=sitename, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=missing_dir, mask_id=mask_id, sample_count=node_cnt_l) # 1
feature_split_info.append(splitinfo)
if use_missing:
sum_grad_l += histogram[-1][0] - histogram[-2][0]
sum_hess_l += histogram[-1][1] - histogram[-2][1]
node_cnt_l += histogram[-1][2] - histogram[-2][2]
missing_dir = np.random.choice(left_missing_dir)
splitinfo = SplitInfo(sitename=sitename, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=missing_dir, mask_id=mask_id, sample_count=node_cnt_l) # -1
feature_split_info.append(splitinfo)
# split info contains g/h sum and node cnt
g_sum, h_sum = histogram[-1][0], histogram[-1][1]
g_h_sum_info = SplitInfo(sum_grad=g_sum, sum_hess=h_sum, sample_count=node_cnt)
return feature_split_info, g_h_sum_info
def construct_feature_split_points_batches(self, kv_iter, valid_features, sitename,
use_missing, mask_id_mapping, left_missing_dir,
right_missing_dir, batch_size,
cipher_compressor=None,
shuffle_random_seed=None):
result_list = []
split_info_dict = {}
g_h_sum_dict = {}
partition_key = None
for key, value in kv_iter:
nid, fid = key
if partition_key is None:
partition_key = str((nid, fid))
split_info_list, g_h_sum_info = self.construct_feature_split_points(value, valid_features, sitename,
use_missing,
left_missing_dir, right_missing_dir,
mask_id_mapping)
# collect all splitinfo of a node
if nid not in split_info_dict:
split_info_dict[nid] = []
split_info_dict[nid] += split_info_list
if nid not in g_h_sum_dict:
if g_h_sum_info is not None:
g_h_sum_dict[nid] = g_h_sum_info
# cut split info into batches
for nid in split_info_dict:
split_info_list = split_info_dict[nid]
if len(split_info_list) == 0:
result_list.append(
((nid, partition_key + '-empty'), [])) # add an empty split info list if no split info available
continue
if shuffle_random_seed:
random.seed(shuffle_random_seed)
random.shuffle(split_info_list)
# LOGGER.debug('nid {} mask id list {}'.format(nid, shuffle_list))
LOGGER.debug('split info len is {}'.format(len(split_info_list)))
batch_start_idx = range(0, len(split_info_list), batch_size)
batch_idx = 0
for i in batch_start_idx:
key = (nid, (partition_key + '-{}'.format(batch_idx))) # nid, batch_id
batch_idx += 1
g_h_sum_info = g_h_sum_dict[nid]
batch_split_info_list = split_info_list[i: i + batch_size]
# compress ciphers
if cipher_compressor is not None:
compressed_packages = cipher_compressor.compress_split_info(batch_split_info_list, g_h_sum_info)
result_list.append((key, (nid, compressed_packages)))
else:
result_list.append((key, (batch_split_info_list, g_h_sum_info)))
return result_list
def _find_host_best_splits_map_func(self, value, decrypter, gh_packer=None,
host_sitename=consts.HOST):
# find best split points in a node for every host feature, mapValues function
best_gain = self.min_impurity_split - consts.FLOAT_ZERO
best_idx = -1
best_split_info = SplitInfo(sitename=host_sitename, best_fid=-1, best_bid=-1, gain=best_gain,
mask_id=-1)
if len(value) == 0: # this node can not be further split, because split info list is empty
return best_idx, best_split_info
if gh_packer is None:
split_info_list, g_h_info = value
for split_info in split_info_list:
split_info.sum_grad, split_info.sum_hess = decrypter.decrypt(split_info.sum_grad), decrypter.decrypt(
split_info.sum_hess)
g_sum, h_sum = decrypter.decrypt(g_h_info.sum_grad), decrypter.decrypt(g_h_info.sum_hess)
else:
nid, package = value
split_info_list = gh_packer.decompress_and_unpack(package)
g_sum, h_sum = split_info_list[-1].sum_grad, split_info_list[-1].sum_hess # g/h sum is at last index
split_info_list = split_info_list[:-1]
for idx, split_info in enumerate(split_info_list):
l_g, l_h = split_info.sum_grad, split_info.sum_hess
r_g, r_h = g_sum - l_g, h_sum - l_h
gain = self.split_gain(g_sum, h_sum, l_g, l_h, r_g, r_h)
if self._check_min_child_weight(l_h, r_h) and \
gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
new_split_info = SplitInfo(sitename=host_sitename, best_fid=split_info.best_fid,
best_bid=split_info.best_bid, gain=gain,
sum_grad=l_g, sum_hess=l_h, missing_dir=split_info.missing_dir,
mask_id=split_info.mask_id)
best_gain = gain
best_idx = idx
best_split_info = new_split_info
best_split_info.gain = best_gain
return best_idx, best_split_info
@staticmethod
def key_sort_func(a, b):
key_1, key_2 = a[0], b[0]
if key_1[0] == key_2[0]:
if key_1[1] > key_2[1]:
return 1
else:
return -1
else:
if key_1[0] > key_2[0]:
return 1
else:
return -1
def find_host_best_split_info(self, host_split_info_table, host_sitename, decrypter, gh_packer=None):
map_func = functools.partial(self._find_host_best_splits_map_func,
decrypter=decrypter,
host_sitename=host_sitename,
gh_packer=gh_packer
)
host_feature_best_split_table = host_split_info_table.mapValues(map_func)
feature_best_splits = list(host_feature_best_split_table.collect())
sorted_list = sorted(feature_best_splits, key=functools.cmp_to_key(self.key_sort_func))
node_best_splits = {}
for key, result in sorted_list:
node_id, fid = key
best_idx, split_info = result
if node_id not in node_best_splits:
node_best_splits[node_id] = SplitInfo(sitename=host_sitename, best_bid=-1, best_fid=-1,
gain=self.min_impurity_split - consts.FLOAT_ZERO)
if best_idx == -1:
continue
elif split_info.gain > self.min_impurity_split and split_info.gain > node_best_splits[node_id].gain \
+ consts.FLOAT_ZERO:
node_best_splits[node_id] = split_info
return node_best_splits
def host_prepare_split_points(self, histograms, valid_features, mask_id_mapping, use_missing, left_missing_dir,
right_missing_dir, sitename=consts.HOST, batch_size=consts.MAX_SPLITINFO_TO_COMPUTE,
cipher_compressor=None, shuffle_random_seed=None):
LOGGER.info("splitter find split of host")
LOGGER.debug('missing dir mask dict {}, {}'.format(left_missing_dir, right_missing_dir))
map_partition_func = functools.partial(self.construct_feature_split_points_batches,
valid_features=valid_features,
sitename=sitename,
use_missing=use_missing,
left_missing_dir=left_missing_dir,
right_missing_dir=right_missing_dir,
mask_id_mapping=mask_id_mapping,
batch_size=batch_size,
cipher_compressor=cipher_compressor,
shuffle_random_seed=shuffle_random_seed
)
host_splitinfo_table = histograms.mapPartitions(map_partition_func, use_previous_behavior=False)
return host_splitinfo_table
def find_split_host(self, histograms, valid_features, node_map, sitename=consts.HOST,
use_missing=False, zero_as_missing=False):
LOGGER.info("splitter find split of host")
LOGGER.debug('node map len is {}'.format(len(node_map)))
tree_node_splitinfo = [[] for i in range(len(node_map))]
encrypted_node_grad_hess = [[] for i in range(len(node_map))]
host_splitinfo_table = histograms.mapValues(lambda fid_with_hist:
self.find_split_single_histogram_host(fid_with_hist, valid_features,
sitename,
use_missing,
zero_as_missing))
# node_id, map it to node index
for (idx, fid), splitinfo in host_splitinfo_table.collect():
idx = node_map[idx]
tree_node_splitinfo[idx].extend(splitinfo[0])
encrypted_node_grad_hess[idx].extend(splitinfo[1])
return tree_node_splitinfo, BigObjectTransfer(encrypted_node_grad_hess)
def node_gain(self, grad, hess):
return self.criterion.node_gain(grad, hess)
def node_weight(self, grad, hess):
return self.criterion.node_weight(grad, hess)
def split_gain(self, sum_grad, sum_hess, sum_grad_l, sum_hess_l, sum_grad_r, sum_hess_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
return gain
class BigObjectTransfer:
def __init__(self, data):
self._obj = data
def get_data(self):
return self._obj
| 23,387 | 44.063584 | 122 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/decision_tree.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
# DecisionTree Base Class
# =============================================================================
import abc
from abc import ABC
import numpy as np
import functools
from federatedml.ensemble.basic_algorithms.algorithm_prototype import BasicAlgorithms
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.feature.fate_element_type import NoneType
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import \
SplitInfo, Splitter
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import \
HistogramBag, FeatureHistogram
from typing import List
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance import FeatureImportance
class DecisionTree(BasicAlgorithms, ABC):
def __init__(self, tree_param):
# input parameters
self.criterion_method = tree_param.criterion_method
self.criterion_params = tree_param.criterion_params
self.max_depth = tree_param.max_depth
self.min_sample_split = tree_param.min_sample_split
self.min_impurity_split = tree_param.min_impurity_split
self.min_leaf_node = tree_param.min_leaf_node
self.max_split_nodes = tree_param.max_split_nodes
self.feature_importance_type = tree_param.feature_importance_type
self.n_iter_no_change = tree_param.n_iter_no_change
self.tol = tree_param.tol
self.use_missing = tree_param.use_missing
self.zero_as_missing = tree_param.zero_as_missing
self.min_child_weight = tree_param.min_child_weight
self.sitename = ''
# transfer var
self.transfer_inst = None
# runtime variable
self.feature_importance = {}
self.tree_node = []
self.cur_layer_nodes = []
self.cur_to_split_nodes = []
self.tree_node_num = 0
self.runtime_idx = None
self.valid_features = None
self.splitter = Splitter(
self.criterion_method,
self.criterion_params,
self.min_impurity_split,
self.min_sample_split,
self.min_leaf_node,
self.min_child_weight) # splitter for finding splits
self.inst2node_idx = None # record the internal node id an instance belongs to
self.sample_leaf_pos = None # record the final leaf id of samples
self.sample_weights = None # leaf weights of samples
self.leaf_count = None # num of samples a leaf covers
# data
self.data_bin = None # data after binning
self.bin_split_points = None
self.bin_sparse_points = None
self.data_with_node_assignments = None
self.cur_layer_sample_count = None
# g_h
self.grad_and_hess = None
# for data protection
self.split_maskdict = {}
self.missing_dir_maskdict = {}
# histogram
self.deterministic = tree_param.deterministic
self.hist_computer = FeatureHistogram()
if self.deterministic:
self.hist_computer.stable_reduce = True
"""
Common functions
"""
def get_feature_importance(self):
return self.feature_importance
@staticmethod
def get_grad_hess_sum(grad_and_hess_table):
LOGGER.info("calculate the sum of grad and hess")
grad, hess = grad_and_hess_table.reduce(
lambda value1, value2: (value1[0] + value2[0], value1[1] + value2[1]))
return grad, hess
def init_data_and_variable(self, flowid, runtime_idx, data_bin, bin_split_points, bin_sparse_points, valid_features,
grad_and_hess):
self.set_flowid(flowid)
self.set_runtime_idx(runtime_idx)
LOGGER.info("set valid features")
self.valid_features = valid_features
self.grad_and_hess = grad_and_hess
self.data_bin = data_bin
self.bin_split_points = bin_split_points
self.bin_sparse_points = bin_sparse_points
def check_max_split_nodes(self):
# check max_split_nodes
if self.max_split_nodes != 0 and self.max_split_nodes % 2 == 1:
self.max_split_nodes += 1
LOGGER.warning('an even max_split_nodes value is suggested '
'when using histogram-subtraction, max_split_nodes reset to {}'.format(self.max_split_nodes))
def set_flowid(self, flowid=0):
LOGGER.info("set flowid, flowid is {}".format(flowid))
self.transfer_inst.set_flowid(flowid)
def set_runtime_idx(self, runtime_idx):
self.runtime_idx = runtime_idx
self.sitename = ":".join([self.sitename, str(self.runtime_idx)])
"""
Histogram interface
"""
def get_local_histograms(self, dep, data_with_pos, g_h, node_sample_count, cur_to_split_nodes, node_map,
ret='tensor', hist_sub=True):
LOGGER.info("start to compute node histograms")
acc_histograms = self.hist_computer.compute_histogram(dep,
data_with_pos,
g_h,
self.bin_split_points,
self.bin_sparse_points,
self.valid_features,
node_map, node_sample_count,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
ret=ret,
hist_sub=hist_sub,
cur_to_split_nodes=cur_to_split_nodes)
LOGGER.info("compute node histograms done")
return acc_histograms
"""
Node map functions
"""
@staticmethod
def get_node_map(nodes: List[Node], left_node_only=False):
node_map = {}
idx = 0
for node in nodes:
if node.id != 0 and (not node.is_left_node and left_node_only):
continue
node_map[node.id] = idx
idx += 1
return node_map
@staticmethod
def get_leaf_node_map(nodes: List[Node]):
leaf_nodes = []
for n in nodes:
if n.is_leaf:
leaf_nodes.append(n)
return DecisionTree.get_node_map(leaf_nodes)
"""
Sample count functions
"""
@staticmethod
def sample_count_map_func(kv, node_map):
# record node sample number in count_arr
count_arr = np.zeros(len(node_map))
for k, v in kv:
if isinstance(v, int): # leaf node format: (leaf_node_id)
key = v
else: # internal node format: (1, node_id)
key = v[1]
if key not in node_map:
continue
node_idx = node_map[key]
count_arr[node_idx] += 1
return count_arr
@staticmethod
def sample_count_reduce_func(v1, v2):
return v1 + v2
def count_node_sample_num(self, inst2node_idx, node_map):
"""
count sample number in internal nodes during training
"""
count_func = functools.partial(self.sample_count_map_func, node_map=node_map)
rs = inst2node_idx.applyPartitions(count_func).reduce(self.sample_count_reduce_func)
return rs
"""
Sample weight functions
"""
def get_sample_weights(self):
# return sample weights to boosting class
return self.sample_weights
@staticmethod
def assign_instance_to_root_node(data_bin, root_node_id):
return data_bin.mapValues(lambda inst: (1, root_node_id))
@staticmethod
def float_round(num):
"""
prevent float error
"""
return np.round(num, consts.TREE_DECIMAL_ROUND)
def update_feature_importance(self, splitinfo, record_site_name=True):
inc_split, inc_gain = 1, splitinfo.gain
sitename = splitinfo.sitename
fid = splitinfo.best_fid
if record_site_name:
key = (sitename, fid)
else:
key = fid
if key not in self.feature_importance:
self.feature_importance[key] = FeatureImportance(0, 0, self.feature_importance_type)
self.feature_importance[key].add_split(inc_split)
if inc_gain is not None:
self.feature_importance[key].add_gain(inc_gain)
@staticmethod
def get_node_weights(node_id, tree_nodes):
return tree_nodes[node_id].weight
def extract_sample_weights_from_node(self, sample_leaf_pos):
"""
Given a dtable contains leaf positions of samples, return leaf weights
"""
func = functools.partial(self.get_node_weights, tree_nodes=self.tree_node)
sample_weights = sample_leaf_pos.mapValues(func)
return sample_weights
def sample_weights_post_process(self):
self.sample_weights = self.extract_sample_weights_from_node(self.sample_leaf_pos)
leaf_node_map = self.get_leaf_node_map(self.tree_node)
leaf_count = self.count_node_sample_num(self.sample_leaf_pos, leaf_node_map)
rs = {}
for k, v in leaf_node_map.items():
rs[k] = int(leaf_count[v])
self.leaf_count = rs
LOGGER.debug('final leaf count is {}'.format(self.leaf_count))
@staticmethod
def make_decision(data_inst, fid, bid, missing_dir, use_missing, zero_as_missing, zero_val=0):
left, right = True, False
missing_dir = left if missing_dir == -1 else right
# use missing and zero as missing
if use_missing and zero_as_missing:
# missing or zero
if data_inst.features.get_data(fid) == NoneType() or data_inst.features.get_data(fid, None) is None:
return missing_dir
# is missing feat
if data_inst.features.get_data(fid) == NoneType():
return missing_dir
# no missing val
feat_val = data_inst.features.get_data(fid, zero_val)
direction = left if feat_val <= bid + consts.FLOAT_ZERO else right
return direction
@staticmethod
def go_next_layer(node, data_inst, use_missing, zero_as_missing, bin_sparse_point=None,
split_maskdict=None,
missing_dir_maskdict=None,
decoder=None,
return_node_id=True):
if missing_dir_maskdict is not None and split_maskdict is not None:
fid = decoder("feature_idx", node.fid, split_maskdict=split_maskdict)
bid = decoder("feature_val", node.bid, node.id, split_maskdict=split_maskdict)
missing_dir = decoder("missing_dir", node.missing_dir, node.id, missing_dir_maskdict=missing_dir_maskdict)
else:
fid, bid = node.fid, node.bid
missing_dir = node.missing_dir
zero_val = 0 if bin_sparse_point is None else bin_sparse_point[fid]
go_left = DecisionTree.make_decision(data_inst, fid, bid, missing_dir, use_missing, zero_as_missing, zero_val)
if not return_node_id:
return go_left
if go_left:
return node.left_nodeid
else:
return node.right_nodeid
def round_leaf_val(self):
# process predict weight to prevent float error
for node in self.tree_node:
if node.is_leaf:
node.weight = self.float_round(node.weight)
@staticmethod
def mo_weight_extract(node):
mo_weight = None
weight = node.weight
if isinstance(node.weight, np.ndarray) and len(node.weight) > 1:
weight = -1
mo_weight = list(node.weight) # use multi output
return weight, mo_weight
@staticmethod
def mo_weight_load(node_param):
weight = node_param.weight
mo_weight = list(node_param.mo_weight)
if len(mo_weight) != 0:
weight = np.array(list(node_param.mo_weight))
return weight
"""
To implement
"""
@abc.abstractmethod
def fit(self):
pass
@abc.abstractmethod
def predict(self, data_inst):
pass
@abc.abstractmethod
def initialize_root_node(self, *args):
pass
@abc.abstractmethod
def compute_best_splits(self, *args):
pass
@abc.abstractmethod
def update_instances_node_positions(self, *args):
pass
@abc.abstractmethod
def assign_an_instance(self, *args):
pass
@abc.abstractmethod
def assign_instances_to_new_node(self, *args):
pass
@abc.abstractmethod
def update_tree(self, *args):
pass
@abc.abstractmethod
def convert_bin_to_real(self, *args):
pass
@abc.abstractmethod
def get_model_meta(self):
raise NotImplementedError("method should overload")
@abc.abstractmethod
def get_model_param(self):
raise NotImplementedError("method should overload")
@abc.abstractmethod
def set_model_param(self, model_param):
pass
@abc.abstractmethod
def set_model_meta(self, model_meta):
pass
@abc.abstractmethod
def traverse_tree(self, *args):
pass
"""
Model I/O
"""
def get_model(self):
model_meta = self.get_model_meta()
model_param = self.get_model_param()
return model_meta, model_param
def load_model(self, model_meta=None, model_param=None):
LOGGER.info("load tree model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
"""
For debug
"""
def print_leafs(self):
LOGGER.debug('printing tree')
if len(self.tree_node) == 0:
LOGGER.debug('this tree is empty')
else:
for node in self.tree_node:
LOGGER.debug(node)
@staticmethod
def print_split(split_infos: [SplitInfo]):
LOGGER.debug('printing split info')
for info in split_infos:
LOGGER.debug(info)
@staticmethod
def print_hist(hist_list: [HistogramBag]):
LOGGER.debug('printing histogramBag')
for bag in hist_list:
LOGGER.debug(bag)
| 15,297 | 32.474836 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/g_h_optim.py
|
import functools
import numpy as np
from federatedml.secureprotol.fixedpoint import FixedPointNumber
from federatedml.secureprotol import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.cipher_compressor.packer import GuestIntegerPacker, cipher_list_to_cipher_tensor
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.util import consts
from federatedml.cipher_compressor.compressor import CipherCompressorHost, NormalCipherPackage
from federatedml.cipher_compressor.compressor import PackingCipherTensorPackage
from federatedml.util import LOGGER
from typing import Union
fix_point_precision = 2 ** 52
REGRESSION_MAX_GRADIENT = 10 ** 9
def post_func(x):
# add a 0 to occupy h position
return x[0], 0
def g_h_recover_post_func(unpack_rs_list: list, precision):
if len(unpack_rs_list) == 2:
g = unpack_rs_list[0] / precision
h = unpack_rs_list[1] / precision
return g, h
else:
g_list, h_list = [], []
for g, h in zip(unpack_rs_list[0::2], unpack_rs_list[1::2]):
g_list.append(g / precision)
h_list.append(h / precision)
return np.array(g_list), np.array(h_list)
class SplitInfoPackage(NormalCipherPackage):
def __init__(self, padding_length, max_capacity):
super(SplitInfoPackage, self).__init__(padding_length, max_capacity)
self._split_info_without_gh = []
self._cur_splitinfo_contains = 0
def add(self, split_info):
split_info_cp = SplitInfo(sitename=split_info.sitename, best_bid=split_info.best_bid,
best_fid=split_info.best_fid, missing_dir=split_info.missing_dir,
mask_id=split_info.mask_id, sample_count=split_info.sample_count)
en_g = split_info.sum_grad
super(SplitInfoPackage, self).add(en_g)
self._cur_splitinfo_contains += 1
self._split_info_without_gh.append(split_info_cp)
def unpack(self, decrypter):
unpack_rs = super(SplitInfoPackage, self).unpack(decrypter)
for split_info, g_h in zip(self._split_info_without_gh, unpack_rs):
split_info.sum_grad = g_h
return self._split_info_without_gh
class SplitInfoPackage2(PackingCipherTensorPackage):
def __init__(self, padding_length, max_capacity):
super(SplitInfoPackage2, self).__init__(padding_length, max_capacity)
self._split_info_without_gh = []
self._cur_splitinfo_contains = 0
def add(self, split_info):
split_info_cp = SplitInfo(sitename=split_info.sitename, best_bid=split_info.best_bid,
best_fid=split_info.best_fid, missing_dir=split_info.missing_dir,
mask_id=split_info.mask_id, sample_count=split_info.sample_count)
en_g = split_info.sum_grad
super(SplitInfoPackage2, self).add(en_g)
self._cur_splitinfo_contains += 1
self._split_info_without_gh.append(split_info_cp)
def unpack(self, decrypter):
unpack_rs = super(SplitInfoPackage2, self).unpack(decrypter)
for split_info, g_h in zip(self._split_info_without_gh, unpack_rs):
split_info.sum_grad = g_h
return self._split_info_without_gh
class GHPacker(object):
def __init__(self, sample_num: int, encrypter: Union[PaillierEncrypt, IpclPaillierEncrypt],
precision=fix_point_precision, max_sample_weight=1.0, task_type=consts.CLASSIFICATION,
g_min=None, g_max=None, h_max=None, class_num=1, mo_mode=False, sync_para=True):
if task_type == consts.CLASSIFICATION:
g_max = 1.0
g_min = -1.0
h_max = 1.0
elif task_type == consts.REGRESSION:
if g_min is None and g_max is None:
g_max = REGRESSION_MAX_GRADIENT # assign a large value for regression gradients
g_min = -g_max
else:
g_max = g_max
g_min = g_min
if h_max is None:
h_max = 2
else:
raise ValueError('unknown task type {}'.format(task_type))
self.g_max, self.g_min, self.h_max = g_max * max_sample_weight, g_min * max_sample_weight, h_max * max_sample_weight
self.g_offset = abs(self.g_min)
self.g_max_int, self.h_max_int = self._compute_packing_parameter(sample_num, precision)
self.exponent = FixedPointNumber.encode(0, precision=precision).exponent
self.precision = precision
self.class_num = class_num
self.mo_mode = mo_mode
self.packer = GuestIntegerPacker(class_num * 2, [self.g_max_int, self.h_max_int] * class_num,
encrypter=encrypter,
sync_para=sync_para)
def _compute_packing_parameter(self, sample_num: int, precision=2 ** 53):
h_sum_max = self.h_max * sample_num
h_max_int = int(h_sum_max * precision) + 1
g_offset_max = self.g_offset + self.g_max
g_max_int = int(g_offset_max * sample_num * precision) + 1
return g_max_int, h_max_int
@staticmethod
def fixedpoint_encode(num, mul):
int_fixpoint = int(round(num * mul))
return int_fixpoint
@staticmethod
def to_fixedpoint_arr_format(gh, mul, g_offset):
en_list = []
g_arr, h_arr = gh
for g, h in zip(g_arr, h_arr):
g += g_offset # to positive
g_encoding = GHPacker.fixedpoint_encode(g, mul)
h_encoding = GHPacker.fixedpoint_encode(h, mul)
en_list.append(g_encoding)
en_list.append(h_encoding)
return en_list
@staticmethod
def to_fixedpoint(gh, mul, g_offset):
g, h = gh
return [GHPacker.fixedpoint_encode(g + g_offset, mul), GHPacker.fixedpoint_encode(h, mul)]
def pack_and_encrypt(self, gh):
fixedpoint_encode_func = self.to_fixedpoint
if self.mo_mode:
fixedpoint_encode_func = self.to_fixedpoint_arr_format
fixed_int_encode_func = functools.partial(fixedpoint_encode_func, mul=self.precision, g_offset=self.g_offset)
large_int_gh = gh.mapValues(fixed_int_encode_func)
if not self.mo_mode:
en_g_h = self.packer.pack_and_encrypt(large_int_gh,
post_process_func=post_func) # take cipher out from list
else:
en_g_h = self.packer.pack_and_encrypt(large_int_gh)
en_g_h = en_g_h.mapValues(lambda x: (x, 0)) # add 0 to occupy h position
return en_g_h
def decompress_and_unpack(self, split_info_package_list):
rs = self.packer.decrypt_cipher_packages(split_info_package_list)
for split_info in rs:
if self.mo_mode:
unpack_rs = self.packer.unpack_an_int_list(split_info.sum_grad)
else:
unpack_rs = self.packer.unpack_an_int(split_info.sum_grad, self.packer.bit_assignment[0])
g, h = g_h_recover_post_func(unpack_rs, fix_point_precision)
split_info.sum_grad = g - self.g_offset * split_info.sample_count
split_info.sum_hess = h
return rs
class PackedGHCompressor(object):
def __init__(self, sync_para=True, mo_mode=False):
package_class = SplitInfoPackage
if mo_mode:
package_class = SplitInfoPackage2
self.compressor = CipherCompressorHost(package_class=package_class, sync_para=sync_para)
def compress_split_info(self, split_info_list, g_h_sum_info):
split_info_list.append(g_h_sum_info) # append to end
rs = self.compressor.compress(split_info_list)
return rs
| 7,759 | 38.794872 | 124 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/tree_plan.py
|
from typing import Tuple, List
from federatedml.util import LOGGER
from federatedml.util import consts
tree_type_dict = {
'guest_feat_only': 0, # use only guest feature to build this tree
'host_feat_only': 1, # use only host feature to build this tree
'normal_tree': 2, # a normal decision tree
'layered_tree': 3 # a layered decision tree
}
tree_actions = {
'guest_only': 0, # use only guest feature to build this layer
'host_only': 1, # use only host feature to build this layer
'guest_and_host': 2, # use global feature to build this layer
}
def create_tree_plan(work_mode: str, k=1, tree_num=10, host_list=None, complete_secure=0):
"""
Args:
work_mode:
k: k is needed when work_mode is 'layered'
tree_num: decision tree number
host_list: need to specify host idx when under multi-host scenario, default is None
complete_secure: int, num of complete secure tree
Returns: tree plan: (work mode, host id) host id -1 is default value
"""
LOGGER.info('boosting_core trees work mode is {}'.format(work_mode))
tree_plan = []
if work_mode == consts.MIX_TREE:
assert k > 0
assert len(host_list) > 0
one_round = [(tree_type_dict['guest_feat_only'], -1)] * k
for host_idx, host_id in enumerate(host_list):
one_round += [(tree_type_dict['host_feat_only'], host_id)] * k
round_num = (tree_num // (2 * k)) + 1
tree_plan = (one_round * round_num)[0:tree_num]
elif work_mode == consts.LAYERED_TREE:
tree_plan = [(tree_type_dict['layered_tree'], -1) for i in range(tree_num)]
if complete_secure > 0:
complete_secure = tree_num if complete_secure > tree_num else complete_secure
for i in range(complete_secure):
tree_plan[i] = (tree_type_dict['guest_feat_only'], -1)
return tree_plan
def create_node_plan(tree_type, target_host_id, max_depth) -> List[Tuple[int, int]]:
LOGGER.debug('cur tree working mode is {}'.format((tree_type, target_host_id)))
node_plan = []
if tree_type == tree_type_dict['guest_feat_only']:
node_plan = [(tree_actions['guest_only'], target_host_id) for i in range(max_depth)]
elif tree_type == tree_type_dict['host_feat_only']:
node_plan = [(tree_actions['host_only'], target_host_id) for i in range(max_depth)]
return node_plan
def create_layered_tree_node_plan(guest_depth=0, host_depth=0, host_list=None):
assert guest_depth > 0 and host_depth > 0
assert len(host_list) > 0
one_round = []
for host_idx, host_id in enumerate(host_list):
one_round += [(tree_type_dict['host_feat_only'], host_id)] * host_depth
one_round += [(tree_type_dict['guest_feat_only'], -1)] * guest_depth
return one_round
def encode_plan(p, split_token='_'):
result = []
for tree_type_or_action, host_id in p:
result.append(str(tree_type_or_action) + split_token + str(host_id))
return result
def decode_plan(s, split_token='_'):
result = []
for string in s:
t = string.split(split_token)
result.append((int(t[0]), int(t[1])))
return result
| 3,192 | 33.333333 | 92 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/criterion.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Criterion
# =============================================================================
import copy
import numpy as np
from federatedml.util import LOGGER
from federatedml.util import consts
class Criterion(object):
def __init__(self, criterion_params):
pass
@staticmethod
def split_gain(node_sum, left_node_sum, right_node_sum):
raise NotImplementedError("node gain calculation method should be define!!!")
class XgboostCriterion(Criterion):
def __init__(self, reg_lambda=0.1, reg_alpha=0):
self.reg_lambda = reg_lambda # l2 reg
self.reg_alpha = reg_alpha # l1 reg
LOGGER.info('splitter criterion setting done: l1 {}, l2 {}'.format(self.reg_alpha, self.reg_lambda))
@staticmethod
def is_tensor(data):
return isinstance(data, np.ndarray)
@staticmethod
def _g_alpha_cmp(gradient, reg_alpha):
if XgboostCriterion.is_tensor(gradient):
new_grad = copy.copy(gradient)
new_grad[new_grad < -reg_alpha] += reg_alpha
new_grad[new_grad > reg_alpha] -= reg_alpha
new_grad[(new_grad <= reg_alpha) & (new_grad >= -reg_alpha)] = 0
return gradient
else:
if gradient < - reg_alpha:
return gradient + reg_alpha
elif gradient > reg_alpha:
return gradient - reg_alpha
else:
return 0
@staticmethod
def truncate(f, n=consts.TREE_DECIMAL_ROUND):
return np.floor(f * 10 ** n) / 10 ** n
def split_gain(self, node_sum, left_node_sum, right_node_sum):
sum_grad, sum_hess = node_sum
left_node_sum_grad, left_node_sum_hess = left_node_sum
right_node_sum_grad, right_node_sum_hess = right_node_sum
rs = self.node_gain(left_node_sum_grad, left_node_sum_hess) + \
self.node_gain(right_node_sum_grad, right_node_sum_hess) - \
self.node_gain(sum_grad, sum_hess)
return self.truncate(rs)
def node_gain(self, sum_grad, sum_hess):
sum_grad, sum_hess = self.truncate(sum_grad), self.truncate(sum_hess)
num = self._g_alpha_cmp(sum_grad, self.reg_alpha)
structure_score = self.truncate(num * num / (sum_hess + self.reg_lambda))
if XgboostCriterion.is_tensor(structure_score):
structure_score = np.sum(structure_score)
return structure_score
def node_weight(self, sum_grad, sum_hess):
weight = self.truncate(-(self._g_alpha_cmp(sum_grad, self.reg_alpha)) / (sum_hess + self.reg_lambda))
return weight
| 3,491 | 35.757895 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/cross_entropy.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.optim import activation
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.loss import Loss
class SigmoidBinaryCrossEntropyLoss(object):
@staticmethod
def initialize(y):
"""
The initialize value if using cross entropy,
this function mainly uses in secureboost's tree value initialize
Parameters
----------
y : Table
The input data's labels
Returns
-------
y_initialize : Table, the value of the table is a 1D numpy ndarray,
which filled with zeros
"""
return y.mapValues(lambda x: np.zeros(1)), np.zeros(1)
@staticmethod
def predict(value):
"""
Predict method for using sigmoid cross entropy
Formula : probability = 1.0 / (1.0 + exp(-value))
Parameters
----------
value : float, The input value of sigmoid function
Returns
-------
probability : float, the output of sigmoid function
"""
return activation.sigmoid(value)
@staticmethod
def compute_loss(y, y_prob, sample_weights=None):
"""
The cross-entropy loss class for binary classification
Formula : -(sum(y * log(y_prob) + (1 - y) * log(1 - y_prob)) / N)
Parameters
----------
y : Table
The input data's labels
y_prob : Table
The predict probability.
Returns
-------
log_loss : float, the binary cross entropy loss
"""
logloss = y.join(y_prob, lambda y, yp: (-np.nan_to_num(y * np.log(yp) + (1 - y) * np.log(1 - yp)), 1))
avg_loss = Loss.reduce(logloss, sample_weights=sample_weights)
return avg_loss
@staticmethod
def compute_grad(y, y_pred):
"""
Compute the grad of sigmoid cross entropy function
Formula : gradient = y_pred - y
Parameters
----------
y : int, label
y_pred : float, the predict probability.
Returns
-------
gradient : float, the gradient of binary cross entropy loss
"""
return y_pred - y
@staticmethod
def compute_hess(y, y_pred):
"""
Compute the hessian(second order derivative of sigmoid cross entropy loss
Formula : hessian = y_pred * (1 - y_pred)
Parameters
----------
y : int, just use for function interface alignment
y_pred : float, the predict probability
Returns
-------
hess : float, the hessian of binary cross entropy loss
"""
return y_pred * (1 - y_pred)
class SoftmaxCrossEntropyLoss(Loss):
@staticmethod
def initialize(y, dims=1):
"""
The initialize value if using softmax cross entropy loss,
this function mainly uses in secureboost's tree value initialize
Parameters
----------
y : Table
The input data's labels
dims: the nums of different category labels
Returns
-------
y_initialize : table, the value of the table is a 1D numpy ndarray
with shape equals to dims, which filled with zeros
"""
return y.mapValues(lambda x: np.zeros(dims)), np.zeros(dims)
@staticmethod
def predict(values):
"""
Predict method for using softmax cross entropy
Formula : probability(category_i) =
exp(value(category_i)) / sum(exp(value(category_i))
Parameters
----------
values : ndarray, The input value of softmax function
Returns
-------
probability : ndarray, the output of softmax function,
the array shape is the sample as input values
"""
return activation.softmax(values)
@staticmethod
def compute_loss(y, y_prob, sample_weights=None):
"""
The cross-entropy loss class for binary classification
Formula : -sum(log(prob(category_i))) / N
Parameters
----------
y : Table
The input data's labels
y_prob : Table, value of Table is ndarray
The predict probability of each category.
Returns
-------
softmax_loss : float, the softmax cross entropy loss
"""
# np.sum(np.nan_to_num(y_i * np.log(y_pred)), axis=1)
loss = y.join(y_prob, lambda y, yp_array: (-np.nan_to_num(np.log(yp_array[y])), 1))
avg_loss = Loss.reduce(loss, sample_weights=sample_weights)
return avg_loss
@staticmethod
def compute_grad(y, y_pred):
"""
Compute the grad of softmax cross entropy function
Parameters
----------
y : int, label
y_pred : ndarray, the predict probability of each category.
Returns
-------
gradient : ndarray, the gradient of softmax cross entropy loss
"""
grad = y_pred.copy()
grad[y] -= 1
return grad
@staticmethod
def compute_hess(y, y_pred):
"""
Compute the hessian of softmax cross entropy function
Parameters
----------
y : int, label
y_pred : ndarray, the predict probability of each category.
Returns
-------
hessian : ndarray, the hessian of softmax cross entropy loss
"""
return 2 * y_pred * (1 - y_pred)
| 6,115 | 26.182222 | 110 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/loss.py
|
from federatedml.statistic.data_overview import with_weight
class Loss(object):
@staticmethod
def initialize(y):
raise NotImplementedError()
@staticmethod
def predict(value):
raise NotImplementedError()
@staticmethod
def compute_loss(y, y_pred, sample_weights=None):
raise NotImplementedError()
@staticmethod
def compute_grad(y, y_pred):
raise NotImplementedError()
@staticmethod
def compute_hess(y, y_pred):
raise NotImplementedError()
@staticmethod
def reduce(sample_loss, sample_weights=None):
from federatedml.util import LOGGER
if sample_weights is not None and with_weight(sample_weights):
# apply sample weights
sample_loss = sample_loss.join(sample_weights, lambda x1, x2: (x1[0] * x2.weight, x1[1] * x2.weight))
loss_sum, sample_num = sample_loss.reduce(lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return loss_sum / sample_num
| 1,016 | 28.057143 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/regression_loss.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import functools
from federatedml.feature.instance import Instance
from federatedml.util import consts
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.loss import Loss
class LeastSquaredErrorLoss(Loss):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
@staticmethod
def predict(value):
return value
@staticmethod
def compute_loss(y, y_pred, sample_weight=None):
lse_loss = y.join(y_pred, lambda y, yp: ((y - yp) * (y - yp), 1))
avg_loss = Loss.reduce(lse_loss, sample_weights=sample_weight)
return avg_loss
@staticmethod
def compute_grad(y, y_pred):
return 2 * (y_pred - y)
@staticmethod
def compute_hess(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
shape = (y - y_pred).shape
return np.full(shape, 2)
else:
return 2
class LeastAbsoluteErrorLoss(Loss):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
median = statistics.get_median()["label"]
return y.mapValues(lambda x: np.asarray([median])), np.asarray([median])
@staticmethod
def predict(value):
return value
@staticmethod
def compute_loss(y, y_pred, sample_weight=None):
lae_loss = y.join(y_pred, lambda y, yp: (np.abs(y - yp), 1))
avg_loss = Loss.reduce(lae_loss, sample_weights=sample_weight)
return avg_loss
@staticmethod
def compute_grad(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
diff = y_pred - y
diff[diff > consts.FLOAT_ZERO] = 1
diff[diff < consts.FLOAT_ZERO] = -1
diff[np.abs(diff) <= consts.FLOAT_ZERO] = 0
return diff
else:
diff = y_pred - y
if diff > consts.FLOAT_ZERO:
return 1
elif diff < consts.FLOAT_ZERO:
return -1
else:
return 0
@staticmethod
def compute_hess(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
shape = (y - y_pred).shape
return np.full(shape, 1)
else:
return 1
class HuberLoss(Loss):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
def __init__(self, delta):
super().__init__()
if delta is None:
self.delta = consts.FLOAT_ZERO
else:
self.delta = delta
if np.abs(self.delta) < consts.FLOAT_ZERO:
self.delta = consts.FLOAT_ZERO
def compute_loss(self, y, y_pred, sample_weight=None):
huber_loss = y.join(y_pred, lambda y, yp:
(self.delta ** 2 * (np.sqrt(1 + ((yp - y) / self.delta) ** 2) - 1), 1))
avg_loss = Loss.reduce(huber_loss, sample_weights=sample_weight)
return avg_loss
@staticmethod
def predict(value):
return value
def compute_grad(self, y, y_pred):
diff = y_pred - y
return diff / np.sqrt(1.0 + diff * diff / (self.delta ** 2))
def compute_hess(self, y, y_pred):
diff = y_pred - y
return 1.0 / (1.0 + diff * diff / (self.delta ** 2)) ** 1.5
class FairLoss(Loss):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
def __init__(self, c):
super().__init__()
if c is None:
self.c = consts.FLOAT_ZERO
else:
self.c = c
if np.abs(self.c) < consts.FLOAT_ZERO:
self.c = consts.FLOAT_ZERO
@staticmethod
def predict(value):
return value
def compute_loss(self, y, y_pred, sample_weight=None):
fair_loss = y.join(y_pred, lambda y, yp:
(self.c * np.abs(yp - y) - self.c ** 2 * np.log(np.abs(yp - y) / self.c + 1), 1))
avg_loss = Loss.reduce(fair_loss, sample_weights=sample_weight)
return avg_loss
def compute_grad(self, y, y_pred):
diff = y_pred - y
return self.c * diff / (np.abs(diff) + self.c)
def compute_hess(self, y, y_pred):
diff = y_pred - y
return self.c ** 2 / (np.abs(diff) + self.c) ** 2
class LogCoshLoss(Loss):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
@staticmethod
def predict(value):
return value
def compute_loss(self, y, y_pred, sample_weight=None):
log_cosh_loss = y.join(y_pred, lambda y, yp: (np.log(np.cosh(yp - y)), 1))
avg_loss = Loss.reduce(log_cosh_loss, sample_weights=sample_weight)
return avg_loss
@staticmethod
def compute_grad(y, y_pred):
return np.tanh(y_pred - y)
@staticmethod
def compute_hess(y, y_pred):
return 1 - np.tanh(y_pred - y) ** 2
class TweedieLoss(Loss):
@staticmethod
def initialize(y):
# init score = 0, equals to base_score=1.0 in xgb, init_score=log(base_score)=0
return y.mapValues(lambda x: np.asarray([0])), np.asarray([0])
def __init__(self, rho=None):
super().__init__()
if rho is None:
self.rho = consts.FLOAT_ZERO
else:
self.rho = rho
@staticmethod
def predict(value):
return np.exp(value)
def compute_loss(self, y, y_pred, sample_weight=None):
loss_func = functools.partial(self._tweedie_loss, rho=self.rho)
tweedie_loss = y.join(y_pred, loss_func)
avg_loss = Loss.reduce(tweedie_loss, sample_weights=sample_weight)
return avg_loss
@staticmethod
def _tweedie_loss(label, pred, rho):
if pred < 1e-10:
pred = 1e-10
a = label * np.exp((1 - rho) * np.log(pred)) / (1 - rho)
b = np.exp((2 - rho) * np.log(pred)) / (2 - rho)
return (-a + b), 1
def compute_grad(self, y, y_pred):
if y < 0:
raise ValueError('y < 0, in tweedie loss label must be non-negative, but got {}'.format(y))
return -y * np.exp((1 - self.rho) * y_pred) + np.exp((2 - self.rho) * y_pred)
def compute_hess(self, y, y_pred):
return -y * (1 - self.rho) * np.exp((1 - self.rho) * y_pred) + (2 - self.rho) * np.exp((2 - self.rho) * y_pred)
| 8,258 | 32.437247 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.cross_entropy import \
SigmoidBinaryCrossEntropyLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.cross_entropy import SoftmaxCrossEntropyLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LeastSquaredErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LeastAbsoluteErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import HuberLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import FairLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LogCoshLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import TweedieLoss
__all__ = ["SigmoidBinaryCrossEntropyLoss",
"SoftmaxCrossEntropyLoss",
"LeastSquaredErrorLoss",
"LeastAbsoluteErrorLoss",
"HuberLoss",
"FairLoss",
"LogCoshLoss",
"TweedieLoss"]
| 1,775 | 51.235294 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/test/regression_loss_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import unittest
import numpy as np
from sklearn import metrics
from fate_arch.session import computing_session as session
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LeastSquaredErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LeastAbsoluteErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import HuberLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import FairLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import LogCoshLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.regression_loss import TweedieLoss
from federatedml.util import consts
class TestLeastSquaredErrorLoss(unittest.TestCase):
def setUp(self):
session.init("test_least_squared_error_loss")
self.lse_loss = LeastSquaredErrorLoss()
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.lse_loss.predict(y)
self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
lse_grad = self.lse_loss.compute_grad(y, y_pred)
grad = 2 * (y_pred - y)
self.assertTrue(np.fabs(lse_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
hess = 2
lse_hess = self.lse_loss.compute_hess(y, y_pred)
self.assertTrue(np.fabs(lse_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
sklearn_loss = metrics.mean_squared_error(self.y_list, self.predict_list)
lse_loss = self.lse_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(lse_loss - sklearn_loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestLeastAbsoluteErrorLoss(unittest.TestCase):
def setUp(self):
session.init("test_least_abs_error_loss")
self.lae_loss = LeastAbsoluteErrorLoss()
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.lae_loss.predict(y)
self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
lse_grad = self.lae_loss.compute_grad(y, y_pred)
diff = y_pred - y
if diff > consts.FLOAT_ZERO:
grad = 1
elif diff < consts.FLOAT_ZERO:
grad = -1
else:
grad = 0
self.assertTrue(np.fabs(lse_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
hess = 1
lae_hess = self.lae_loss.compute_hess(y, y_pred)
self.assertTrue(np.fabs(lae_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
sklearn_loss = metrics.mean_absolute_error(self.y_list, self.predict_list)
lae_loss = self.lae_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(lae_loss - sklearn_loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestHuberLoss(unittest.TestCase):
def setUp(self):
session.init("test_huber_loss")
self.delta = 1
self.huber_loss = HuberLoss(self.delta)
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.huber_loss.predict(y)
self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
huber_grad = self.huber_loss.compute_grad(y, y_pred)
diff = y_pred - y
grad = diff / np.sqrt(diff * diff / self.delta ** 2 + 1)
self.assertTrue(np.fabs(huber_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
huber_hess = self.huber_loss.compute_hess(y, y_pred)
diff = y_pred - y
hess = 1.0 / (1 + diff * diff / self.delta ** 2) ** 1.5
self.assertTrue(np.fabs(huber_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
loss = 0
for y, y_pred in zip(self.y_list, self.predict_list):
diff = y_pred - y
loss += self.delta ** 2 * (np.sqrt(1 + diff ** 2 / self.delta ** 2) - 1)
loss /= len(self.y_list)
huber_loss = self.huber_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(huber_loss - loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestFairLoss(unittest.TestCase):
def setUp(self):
session.init("test_fair_loss")
self.c = 1
self.fair_loss = FairLoss(self.c)
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.fair_loss.predict(y)
self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
fair_grad = self.fair_loss.compute_grad(y, y_pred)
diff = y_pred - y
grad = self.c * diff / (np.abs(diff) + self.c)
self.assertTrue(np.fabs(fair_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
fair_hess = self.fair_loss.compute_hess(y, y_pred)
diff = y_pred - y
hess = self.c ** 2 / (np.abs(diff) + self.c) ** 2
self.assertTrue(np.fabs(fair_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
loss = 0
for y, y_pred in zip(self.y_list, self.predict_list):
diff = y_pred - y
loss += self.c ** 2 * (np.abs(diff) / self.c - np.log(np.abs(diff) / self.c + 1))
loss /= len(self.y_list)
fair_loss = self.fair_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(fair_loss - loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestLogCoshLoss(unittest.TestCase):
def setUp(self):
session.init("test_fair_loss")
self.log_cosh_loss = LogCoshLoss()
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.log_cosh_loss.predict(y)
self.assertTrue(np.fabs(y_pred - y) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
log_cosh_grad = self.log_cosh_loss.compute_grad(y, y_pred)
diff = y_pred - y
grad = np.tanh(diff)
self.assertTrue(np.fabs(log_cosh_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
log_cosh_hess = self.log_cosh_loss.compute_hess(y, y_pred)
diff = y_pred - y
hess = 1 - np.tanh(diff) ** 2
self.assertTrue(np.fabs(log_cosh_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
loss = 0
for y, y_pred in zip(self.y_list, self.predict_list):
diff = y_pred - y
loss += np.log(np.cosh(diff))
loss /= len(self.y_list)
log_cosh_loss = self.log_cosh_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(log_cosh_loss - loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestTweedieLoss(unittest.TestCase):
def setUp(self):
session.init("test_tweedie_loss")
self.rho = 0.5
self.tweedie_loss = TweedieLoss(self.rho)
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [self.tweedie_loss.predict(random.random()) for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for y in self.y_list:
y_pred = self.tweedie_loss.predict(y)
self.assertTrue(np.fabs(y_pred - np.exp(y)) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for y, y_pred in zip(self.y_list, self.predict_list):
tweedie_grad = self.tweedie_loss.compute_grad(y, y_pred)
grad = -y * np.exp((1 - self.rho) * y_pred) + np.exp((2 - self.rho) * y_pred)
self.assertTrue(np.fabs(tweedie_grad - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for y, y_pred in zip(self.y_list, self.predict_list):
tweedie_loss_hess = self.tweedie_loss.compute_hess(y, y_pred)
hess = -y * (1 - self.rho) * np.exp((1 - self.rho) * y_pred) + \
(2 - self.rho) * np.exp((2 - self.rho) * y_pred)
self.assertTrue(np.fabs(tweedie_loss_hess - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
loss = 0
for y, y_pred in zip(self.y_list, self.predict_list):
if y_pred < 1e-10:
y_pred = 1e-10
a = y * np.exp((1 - self.rho) * np.log(y_pred)) / (1 - self.rho)
b = np.exp((2 - self.rho) * np.log(y_pred)) / (2 - self.rho)
loss += (-a + b)
loss /= len(self.y_list)
tweedie_loss = self.tweedie_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(tweedie_loss - loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
if __name__ == "__main__":
unittest.main()
| 11,824 | 39.775862 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/test/cross_entropy_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import unittest
import numpy as np
from sklearn import metrics
from fate_arch.session import computing_session as session
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.cross_entropy import \
SigmoidBinaryCrossEntropyLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss.cross_entropy import SoftmaxCrossEntropyLoss
from federatedml.util import consts
class TestSigmoidBinaryCrossEntropyLoss(unittest.TestCase):
def setUp(self):
session.init("test_cross_entropy")
self.sigmoid_loss = SigmoidBinaryCrossEntropyLoss()
self.y_list = [i % 2 for i in range(100)]
self.predict_list = [random.random() for i in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for i in range(1, 10):
np_v = 1.0 / (1.0 + np.exp(-1.0 / i))
self.assertTrue(np.fabs(self.sigmoid_loss.predict(1.0 / i) - np_v) < consts.FLOAT_ZERO)
def test_compute_gradient(self):
for i in range(10):
pred = random.random()
y = i % 2
grad = pred - y
self.assertTrue(np.fabs(self.sigmoid_loss.compute_grad(y, pred) - grad) < consts.FLOAT_ZERO)
def test_compute_hess(self):
for i in range(10):
pred = random.random()
y = i % 2
hess = pred * (1 - pred)
self.assertTrue(np.fabs(self.sigmoid_loss.compute_hess(y, pred) - hess) < consts.FLOAT_ZERO)
def test_compute_loss(self):
sklearn_loss = metrics.log_loss(self.y_list, self.predict_list)
sigmoid_loss = self.sigmoid_loss.compute_loss(self.y, self.predict)
self.assertTrue(np.fabs(sigmoid_loss - sklearn_loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
class TestSoftmaxCrossEntropyLoss(unittest.TestCase):
def setUp(self):
session.init("test_cross_entropy")
self.softmax_loss = SoftmaxCrossEntropyLoss()
self.y_list = [i % 5 for i in range(100)]
self.predict_list = [np.array([random.random() for i in range(5)]) for j in range(100)]
self.y = session.parallelize(self.y_list, include_key=False, partition=16)
self.predict = session.parallelize(self.predict_list, include_key=False, partition=16)
def test_predict(self):
for i in range(10):
list = [random.random() for j in range(5)]
pred_arr = np.asarray(list, dtype='float64')
mx = pred_arr.max()
predict = np.exp(pred_arr - mx) / sum(np.exp(pred_arr - mx))
softmaxloss_predict = self.softmax_loss.predict(pred_arr)
self.assertTrue(np.fabs(predict - softmaxloss_predict).all() < consts.FLOAT_ZERO)
def test_compute_grad(self):
for i in range(10):
pred = np.asarray([random.random() for j in range(5)], dtype="float64")
label = random.randint(0, 4)
softmaxloss_grad = self.softmax_loss.compute_grad(label, pred)
grad = pred.copy()
grad[label] -= 1
self.assertTrue(np.fabs(grad - softmaxloss_grad).all() < consts.FLOAT_ZERO)
def test_compute_hess(self):
for i in range(10):
pred = np.asarray([random.random() for j in range(5)], dtype='float64')
label = random.randint(0, 4)
softmaxloss_hess = self.softmax_loss.compute_hess(label, pred)
hess = 2 * pred * (1 - pred)
self.assertTrue(np.fabs(hess - softmaxloss_hess).all() < consts.FLOAT_ZERO)
def test_compute_loss(self):
softmax_loss = self.softmax_loss.compute_loss(self.y, self.predict)
loss = sum(-np.log(pred[yi]) for yi, pred in zip(self.y_list, self.predict_list)) / len(self.y_list)
self.assertTrue(np.fabs(softmax_loss - loss) < consts.FLOAT_ZERO)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 4,683 | 40.451327 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/homo/homo_decision_tree_client.py
|
import functools
import numpy as np
import sklearn
from typing import List
from federatedml.util import LOGGER
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import CriterionMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from federatedml.transfer_variable.transfer_class.homo_decision_tree_transfer_variable import \
HomoDecisionTreeTransferVariable
from federatedml.util import consts
from federatedml.ensemble import FeatureHistogram
from federatedml.ensemble import DecisionTree
from federatedml.ensemble import Splitter
from federatedml.ensemble import Node
from federatedml.ensemble import HistogramBag
from federatedml.ensemble import SplitInfo
from federatedml.ensemble import DecisionTreeClientAggregator
from federatedml.feature.instance import Instance
from federatedml.param import DecisionTreeParam
from sklearn.ensemble._hist_gradient_boosting.grower import HistogramBuilder
from fate_arch.session import computing_session as session
class HomoDecisionTreeClient(DecisionTree):
def __init__(self, tree_param: DecisionTreeParam, data_bin=None, bin_split_points: np.array = None,
bin_sparse_point=None, g_h=None, valid_feature: dict = None, epoch_idx: int = None,
role: str = None, tree_idx: int = None, flow_id: int = None, mode='train'):
"""
Parameters
----------
tree_param: decision tree parameter object
data_bin binned: data instance
bin_split_points: data split points
bin_sparse_point: sparse data point
g_h computed: g val and h val of instances
valid_feature: dict points out valid features {valid:true,invalid:false}
epoch_idx: current epoch index
role: host or guest
flow_id: flow id
mode: train / predict
"""
super(HomoDecisionTreeClient, self).__init__(tree_param)
self.splitter = Splitter(self.criterion_method, self.criterion_params, self.min_impurity_split,
self.min_sample_split, self.min_leaf_node)
self.data_bin = data_bin
self.g_h = g_h
self.bin_split_points = bin_split_points
self.bin_sparse_points = bin_sparse_point
self.epoch_idx = epoch_idx
self.tree_idx = tree_idx
self.transfer_inst = HomoDecisionTreeTransferVariable()
"""
initializing here
"""
self.valid_features = valid_feature
self.tree_node = [] # start from root node
self.tree_node_num = 0
self.cur_layer_node = []
self.runtime_idx = 0
self.sitename = consts.GUEST
# memory backend
self.arr_bin_data = None
self.memory_hist_builder_list = []
self.sample_id_arr = None
self.bin_num = 0
# secure aggregator, class SecureBoostClientAggregator
if mode == 'train':
self.role = role
self.set_flowid(flow_id)
self.aggregator = DecisionTreeClientAggregator(verbose=False)
elif mode == 'predict':
self.role, self.aggregator = None, None
self.check_max_split_nodes()
LOGGER.debug('use missing status {} {}'.format(self.use_missing, self.zero_as_missing))
def set_flowid(self, flowid):
LOGGER.info("set flowid, flowid is {}".format(flowid))
self.transfer_inst.set_flowid(flowid)
"""
Federation functions
"""
def sync_local_node_histogram(self, acc_histogram: List[HistogramBag], suffix):
# sending local histogram
self.aggregator.send_histogram(acc_histogram, suffix=suffix)
LOGGER.debug('local histogram sent at layer {}'.format(suffix[0]))
def sync_cur_layer_node_num(self, node_num, suffix):
self.transfer_inst.cur_layer_node_num.remote(node_num, role=consts.ARBITER, idx=-1, suffix=suffix)
def sync_best_splits(self, suffix) -> List[SplitInfo]:
best_splits = self.transfer_inst.best_split_points.get(idx=0, suffix=suffix)
return best_splits
"""
Computing functions
"""
def get_node_map(self, nodes: List[Node], left_node_only=True):
node_map = {}
idx = 0
for node in nodes:
if node.id != 0 and (not node.is_left_node and left_node_only):
continue
node_map[node.id] = idx
idx += 1
return node_map
def get_grad_hess_sum(self, grad_and_hess_table):
LOGGER.info("calculate the sum of grad and hess")
grad, hess = grad_and_hess_table.reduce(
lambda value1, value2: (value1[0] + value2[0], value1[1] + value2[1]))
return grad, hess
def get_local_histogram(self, cur_to_split: List[Node], g_h, table_with_assign,
split_points, sparse_point, valid_feature):
LOGGER.info("start to get node histograms")
node_map = self.get_node_map(nodes=cur_to_split)
histograms = FeatureHistogram.calculate_histogram(
table_with_assign, g_h,
split_points, sparse_point,
valid_feature, node_map,
self.use_missing, self.zero_as_missing)
hist_bags = []
for hist_list in histograms:
hist_bags.append(HistogramBag(hist_list))
return hist_bags
def get_left_node_local_histogram(self, cur_nodes: List[Node], tree: List[Node], g_h, table_with_assign,
split_points, sparse_point, valid_feature):
node_map = self.get_node_map(cur_nodes, left_node_only=True)
LOGGER.info("start to get node histograms")
histograms = self.hist_computer.calculate_histogram(
table_with_assign, g_h,
split_points, sparse_point,
valid_feature, node_map,
self.use_missing, self.zero_as_missing)
hist_bags = []
for hist_list in histograms:
hist_bags.append(HistogramBag(hist_list))
left_nodes = []
for node in cur_nodes:
if node.is_left_node or node.id == 0:
left_nodes.append(node)
# set histogram id and parent histogram id
for node, hist_bag in zip(left_nodes, hist_bags):
# LOGGER.debug('node id {}, node parent id {}, cur tree {}'.format(node.id, node.parent_nodeid, len(tree)))
hist_bag.hid = node.id
hist_bag.p_hid = node.parent_nodeid
return hist_bags
"""
Tree Updating
"""
def update_tree(self, cur_to_split: List[Node], split_info: List[SplitInfo]):
"""
update current tree structure
----------
split_info
"""
LOGGER.debug('updating tree_node, cur layer has {} node'.format(len(cur_to_split)))
next_layer_node = []
assert len(cur_to_split) == len(split_info)
for idx in range(len(cur_to_split)):
sum_grad = cur_to_split[idx].sum_grad
sum_hess = cur_to_split[idx].sum_hess
if split_info[idx].best_fid is None or split_info[idx].gain <= self.min_impurity_split + consts.FLOAT_ZERO:
cur_to_split[idx].is_leaf = True
self.tree_node.append(cur_to_split[idx])
continue
cur_to_split[idx].fid = split_info[idx].best_fid
cur_to_split[idx].bid = split_info[idx].best_bid
cur_to_split[idx].missing_dir = split_info[idx].missing_dir
p_id = cur_to_split[idx].id
l_id, r_id = self.tree_node_num + 1, self.tree_node_num + 2
cur_to_split[idx].left_nodeid, cur_to_split[idx].right_nodeid = l_id, r_id
self.tree_node_num += 2
l_g, l_h = split_info[idx].sum_grad, split_info[idx].sum_hess
# create new left node and new right node
left_node = Node(id=l_id,
sitename=self.sitename,
sum_grad=l_g,
sum_hess=l_h,
weight=self.splitter.node_weight(l_g, l_h),
parent_nodeid=p_id,
sibling_nodeid=r_id,
is_left_node=True)
right_node = Node(id=r_id,
sitename=self.sitename,
sum_grad=sum_grad - l_g,
sum_hess=sum_hess - l_h,
weight=self.splitter.node_weight(sum_grad - l_g, sum_hess - l_h),
parent_nodeid=p_id,
sibling_nodeid=l_id,
is_left_node=False)
next_layer_node.append(left_node)
next_layer_node.append(right_node)
self.tree_node.append(cur_to_split[idx])
self.update_feature_importance(split_info[idx], record_site_name=False)
return next_layer_node
@staticmethod
def assign_an_instance(row, tree: List[Node], bin_sparse_point, use_missing, use_zero_as_missing):
leaf_status, nodeid = row[1]
node = tree[nodeid]
if node.is_leaf:
return node.id
data_inst = row[0]
new_layer_nodeid = DecisionTree.go_next_layer(node, data_inst, use_missing, use_zero_as_missing,
bin_sparse_point=bin_sparse_point)
return 1, new_layer_nodeid
def assign_instances_to_new_node(self, table_with_assignment, tree_node: List[Node]):
LOGGER.debug('re-assign instance to new nodes')
assign_method = functools.partial(
self.assign_an_instance,
tree=tree_node,
bin_sparse_point=self.bin_sparse_points,
use_missing=self.use_missing,
use_zero_as_missing=self.zero_as_missing)
assign_result = table_with_assignment.mapValues(assign_method)
leaf_val = assign_result.filter(lambda key, value: isinstance(value, tuple) is False)
assign_result = assign_result.subtractByKey(leaf_val)
return assign_result, leaf_val
def update_instances_node_positions(self, ):
return self.data_bin.join(self.inst2node_idx, lambda inst, assignment: (inst, assignment))
"""
Pre/Post process
"""
@staticmethod
def get_node_sample_weights(inst2node, tree_node: List[Node]):
"""
get samples' weights which correspond to its node assignment
"""
func = functools.partial(lambda inst, nodes: nodes[inst[1]].weight, nodes=tree_node)
return inst2node.mapValues(func)
def convert_bin_to_real(self):
"""
convert current bid in tree nodes to real value
"""
for node in self.tree_node:
if not node.is_leaf:
node.bid = self.bin_split_points[node.fid][node.bid]
def assign_instance_to_root_node(self, data_bin, root_node_id):
return data_bin.mapValues(lambda inst: (1, root_node_id))
def init_root_node_and_gh_sum(self):
# compute local g_sum and h_sum
g_sum, h_sum = self.get_grad_hess_sum(self.g_h)
# get aggregated root info
self.aggregator.send_local_root_node_info(g_sum, h_sum, suffix=('root_node_sync1', self.epoch_idx))
g_h_dict = self.aggregator.get_aggregated_root_info(suffix=('root_node_sync2', self.epoch_idx))
global_g_sum, global_h_sum = g_h_dict['g_sum'], g_h_dict['h_sum']
# initialize node
root_node = Node(
id=0,
sitename=consts.GUEST,
sum_grad=global_g_sum,
sum_hess=global_h_sum,
weight=self.splitter.node_weight(
global_g_sum,
global_h_sum))
self.cur_layer_node = [root_node]
"""
Memory backend functions
"""
def get_g_h_arr(self):
g_, h_ = [], []
for id_, gh in self.g_h.collect():
g_.append(gh[0])
h_.append(gh[1])
g_, h_ = np.array(g_).astype(np.float32), np.array(h_).astype(np.float32)
return g_, h_
def init_node2index(self, sample_num):
root_sample_idx = np.array([i for i in range(sample_num)]).astype(np.uint32)
return root_sample_idx
@staticmethod
def skl_comp(ver1, ver2):
split_v1, split_v2 = ver1.split('.'), ver2.split('.')
for v1s, v2s in zip(split_v1, split_v2):
v1, v2 = int(v1s), int(v2s)
if v1 != v2:
return -1 if v1 < v2 else 1
return 0
def _get_hist_builder(self, g, h, bin_data, bin_num):
try:
hist_builder = HistogramBuilder(bin_data, bin_num, g, h, False)
except TypeError as e:
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads(None)
hist_builder = HistogramBuilder(bin_data, bin_num, g, h, False, n_threads)
return hist_builder
def init_memory_hist_builder(self, g, h, bin_data, bin_num):
if self.skl_comp(sklearn.__version__, '0.24.2') == -1:
raise ValueError('Please upgrade your sklearn version, current version is {}, '
'expected version is >= 0.24.2'.format(sklearn.__version__))
if len(g.shape) == 2: # mo case
idx_end = g.shape[1]
for i in range(0, idx_end):
g_arr = np.ascontiguousarray(g[::, i], dtype=np.float32)
h_arr = np.ascontiguousarray(h[::, i], dtype=np.float32)
hist_builder = self._get_hist_builder(g_arr, h_arr, bin_data, bin_num)
self.memory_hist_builder_list.append(hist_builder)
else:
hist_builder = self._get_hist_builder(g, h, bin_data, bin_num)
self.memory_hist_builder_list.append(hist_builder)
def sklearn_compute_agg_hist(self, data_indices):
hist = []
for memory_hist_builder in self.memory_hist_builder_list:
hist_memory_view = memory_hist_builder.compute_histograms_brute(data_indices)
hist_arr = np.array(hist_memory_view)
g = hist_arr['sum_gradients'].cumsum(axis=1)
h = hist_arr['sum_hessians'].cumsum(axis=1)
count = hist_arr['count'].cumsum(axis=1)
final_hist = []
for feat_idx in range(len(g)):
arr = np.array([g[feat_idx], h[feat_idx], count[feat_idx]]).transpose()
final_hist.append(arr)
hist.append(np.array(final_hist))
# non-mo case, return nd array
if len(hist) == 1:
return hist[0]
# handle mo case, return list
multi_dim_g, multi_dim_h, count = None, None, None
for dimension_hist in hist:
cur_g, cur_h = dimension_hist[::, ::, 0], dimension_hist[::, ::, 1]
cur_g = cur_g.reshape(cur_g.shape[0], cur_g.shape[1], 1)
cur_h = cur_h.reshape(cur_h.shape[0], cur_h.shape[1], 1)
if multi_dim_g is None and multi_dim_h is None:
multi_dim_g = cur_g
multi_dim_h = cur_h
else:
multi_dim_g = np.concatenate([multi_dim_g, cur_g], axis=-1)
multi_dim_h = np.concatenate([multi_dim_h, cur_h], axis=-1)
if count is None:
count = dimension_hist[::, ::, 2]
# is a slow realization, to improve
rs = []
for feat_g, feat_h, feat_c in zip(multi_dim_g, multi_dim_h, count):
feat_hist = [[g_arr, h_arr, c] for g_arr, h_arr, c in zip(feat_g, feat_h, feat_c)]
rs.append(feat_hist)
return rs
def assign_arr_inst(self, node, data_arr, data_indices, missing_bin_index=None):
# a fast inst assign using memory computing
inst = data_arr[data_indices]
fid = node.fid
bid = node.bid
decision = inst[::, fid] <= bid
if self.use_missing and missing_bin_index is not None:
missing_dir = True if node.missing_dir == -1 else False
missing_samples = (inst[::, fid] == missing_bin_index)
decision[missing_samples] = missing_dir
left_samples = data_indices[decision]
right_samples = data_indices[~decision]
return left_samples, right_samples
"""
Fit & Predict
"""
def memory_fit(self):
"""
fitting using memory backend
"""
LOGGER.info('begin to fit homo decision tree, epoch {}, tree idx {},'
'running on memory backend'.format(self.epoch_idx, self.tree_idx))
self.init_root_node_and_gh_sum()
g, h = self.get_g_h_arr()
self.init_memory_hist_builder(g, h, self.arr_bin_data, self.bin_num + self.use_missing) # last missing bin
root_indices = self.init_node2index(len(self.arr_bin_data))
self.cur_layer_node[0].inst_indices = root_indices # root node
tree_height = self.max_depth + 1 # non-leaf node height + 1 layer leaf
for dep in range(tree_height):
if dep + 1 == tree_height:
for node in self.cur_layer_node:
node.is_leaf = True
self.tree_node.append(node)
break
self.sync_cur_layer_node_num(len(self.cur_layer_node), suffix=(dep, self.epoch_idx, self.tree_idx))
node_map = self.get_node_map(self.cur_layer_node)
node_hists = []
for batch_id, i in enumerate(range(0, len(self.cur_layer_node), self.max_split_nodes)):
cur_to_split = self.cur_layer_node[i:i + self.max_split_nodes]
for node in cur_to_split:
if node.id in node_map:
hist = self.sklearn_compute_agg_hist(node.inst_indices)
hist_bag = HistogramBag(hist)
hist_bag.hid = node.id
hist_bag.p_hid = node.parent_nodeid
node_hists.append(hist_bag)
self.sync_local_node_histogram(node_hists, suffix=(batch_id, dep, self.epoch_idx, self.tree_idx))
node_hists = []
split_info = self.sync_best_splits(suffix=(dep, self.epoch_idx))
new_layer_node = self.update_tree(self.cur_layer_node, split_info)
node2inst_idx = []
for node in self.cur_layer_node:
if node.is_leaf:
continue
l, r = self.assign_arr_inst(node, self.arr_bin_data, node.inst_indices, missing_bin_index=self.bin_num)
node2inst_idx.append(l)
node2inst_idx.append(r)
assert len(node2inst_idx) == len(new_layer_node)
for node, indices in zip(new_layer_node, node2inst_idx):
node.inst_indices = indices
self.cur_layer_node = new_layer_node
sample_indices, weights = [], []
for node in self.tree_node:
if node.is_leaf:
sample_indices += list(node.inst_indices)
weights += [node.weight] * len(node.inst_indices)
else:
node.bid = self.bin_split_points[node.fid][int(node.bid)]
# post-processing of memory backend fit
sample_id = self.sample_id_arr[sample_indices]
self.leaf_count = {}
for node in self.tree_node:
if node.is_leaf:
self.leaf_count[node.id] = len(node.inst_indices)
LOGGER.debug('leaf count is {}'.format(self.leaf_count))
sample_id_type = type(self.g_h.take(1)[0][0])
self.sample_weights = session.parallelize([(sample_id_type(id_), weight) for id_, weight in zip(
sample_id, weights)], include_key=True, partition=self.data_bin.partitions)
def fit(self):
"""
start to fit
"""
LOGGER.info('begin to fit homo decision tree, epoch {}, tree idx {},'
'running on distributed backend'.format(self.epoch_idx, self.tree_idx))
self.init_root_node_and_gh_sum()
LOGGER.debug('assign samples to root node')
self.inst2node_idx = self.assign_instance_to_root_node(self.data_bin, 0)
tree_height = self.max_depth + 1 # non-leaf node height + 1 layer leaf
for dep in range(tree_height):
if dep + 1 == tree_height:
for node in self.cur_layer_node:
node.is_leaf = True
self.tree_node.append(node)
rest_sample_leaf_pos = self.inst2node_idx.mapValues(lambda x: x[1])
if self.sample_leaf_pos is None:
self.sample_leaf_pos = rest_sample_leaf_pos
else:
self.sample_leaf_pos = self.sample_leaf_pos.union(rest_sample_leaf_pos)
# stop fitting
break
LOGGER.debug('start to fit layer {}'.format(dep))
table_with_assignment = self.update_instances_node_positions()
# send current layer node number:
self.sync_cur_layer_node_num(len(self.cur_layer_node), suffix=(dep, self.epoch_idx, self.tree_idx))
split_info, agg_histograms = [], []
for batch_id, i in enumerate(range(0, len(self.cur_layer_node), self.max_split_nodes)):
cur_to_split = self.cur_layer_node[i:i + self.max_split_nodes]
node_map = self.get_node_map(nodes=cur_to_split)
LOGGER.debug('node map is {}'.format(node_map))
LOGGER.debug('computing histogram for batch{} at depth{}'.format(batch_id, dep))
local_histogram = self.get_left_node_local_histogram(
cur_nodes=cur_to_split,
tree=self.tree_node,
g_h=self.g_h,
table_with_assign=table_with_assignment,
split_points=self.bin_split_points,
sparse_point=self.bin_sparse_points,
valid_feature=self.valid_features
)
LOGGER.debug('federated finding best splits for batch{} at layer {}'.format(batch_id, dep))
self.sync_local_node_histogram(local_histogram, suffix=(batch_id, dep, self.epoch_idx, self.tree_idx))
agg_histograms += local_histogram
split_info = self.sync_best_splits(suffix=(dep, self.epoch_idx))
LOGGER.debug('got best splits from arbiter')
new_layer_node = self.update_tree(self.cur_layer_node, split_info)
self.cur_layer_node = new_layer_node
self.inst2node_idx, leaf_val = self.assign_instances_to_new_node(table_with_assignment, self.tree_node)
# record leaf val
if self.sample_leaf_pos is None:
self.sample_leaf_pos = leaf_val
else:
self.sample_leaf_pos = self.sample_leaf_pos.union(leaf_val)
LOGGER.debug('assigning instance to new nodes done')
self.convert_bin_to_real()
self.sample_weights_post_process()
LOGGER.debug('fitting tree done')
def traverse_tree(self, data_inst: Instance, tree: List[Node], use_missing, zero_as_missing):
nid = 0 # root node id
while True:
if tree[nid].is_leaf:
return tree[nid].weight
nid = DecisionTree.go_next_layer(tree[nid], data_inst, use_missing, zero_as_missing)
def predict(self, data_inst):
LOGGER.debug('tree start to predict')
traverse_tree = functools.partial(self.traverse_tree,
tree=self.tree_node,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing, )
predicted_weights = data_inst.mapValues(traverse_tree)
return predicted_weights
"""
Model Outputs
"""
def get_model_meta(self):
model_meta = DecisionTreeModelMeta()
model_meta.criterion_meta.CopyFrom(CriterionMeta(criterion_method=self.criterion_method,
criterion_param=self.criterion_params))
model_meta.max_depth = self.max_depth
model_meta.min_sample_split = self.min_sample_split
model_meta.min_impurity_split = self.min_impurity_split
model_meta.min_leaf_node = self.min_leaf_node
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
return model_meta
def set_model_meta(self, model_meta):
self.max_depth = model_meta.max_depth
self.min_sample_split = model_meta.min_sample_split
self.min_impurity_split = model_meta.min_impurity_split
self.min_leaf_node = model_meta.min_leaf_node
self.criterion_method = model_meta.criterion_meta.criterion_method
self.criterion_params = list(model_meta.criterion_meta.criterion_param)
self.use_missing = model_meta.use_missing
self.zero_as_missing = model_meta.zero_as_missing
def get_model_param(self):
model_param = DecisionTreeModelParam()
for node in self.tree_node:
weight, mo_weight = self.mo_weight_extract(node)
model_param.tree_.add(id=node.id,
sitename=self.role,
fid=node.fid,
bid=node.bid,
weight=weight,
is_leaf=node.is_leaf,
left_nodeid=node.left_nodeid,
right_nodeid=node.right_nodeid,
missing_dir=node.missing_dir,
mo_weight=mo_weight
)
model_param.leaf_count.update(self.leaf_count)
return model_param
def set_model_param(self, model_param):
self.tree_node = []
for node_param in model_param.tree_:
weight = self.mo_weight_load(node_param)
_node = Node(id=node_param.id,
sitename=node_param.sitename,
fid=node_param.fid,
bid=node_param.bid,
weight=weight,
is_leaf=node_param.is_leaf,
left_nodeid=node_param.left_nodeid,
right_nodeid=node_param.right_nodeid,
missing_dir=node_param.missing_dir)
self.tree_node.append(_node)
def get_model(self):
model_meta = self.get_model_meta()
model_param = self.get_model_param()
return model_meta, model_param
def load_model(self, model_meta=None, model_param=None):
LOGGER.info("load tree model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
def compute_best_splits(self, *args):
# not implemented in homo tree
pass
def initialize_root_node(self, *args):
# not implemented in homo tree
pass
| 27,219 | 38.853587 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/homo/homo_decision_tree_aggregator.py
|
from typing import List, Dict
from federatedml.util import LOGGER
from federatedml.framework.weights import DictWeights
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import HistogramBag, \
FeatureHistogramWeights
class DecisionTreeArbiterAggregator(object):
"""
secure aggregator for secureboosting Arbiter, gather histogram and numbers
"""
def __init__(self, verbose=False):
self.aggregator = SecureAggregatorServer(secure_aggregate=True, communicate_match_suffix='tree_agg')
self.verbose = verbose
def aggregate_histogram(self, suffix) -> List[HistogramBag]:
agg_histogram = self.aggregator.aggregate_model(suffix=suffix)
if self.verbose:
for hist in agg_histogram._weights:
LOGGER.debug('showing aggregated hist{}, hid is {}'.format(hist, hist.hid))
return agg_histogram._weights
def aggregate_root_node_info(self, suffix):
agg_data = self.aggregator.aggregate_model(suffix)
d = agg_data._weights
return d['g_sum'], d['h_sum']
def broadcast_root_info(self, g_sum, h_sum, suffix):
d = {'g_sum': g_sum, 'h_sum': h_sum}
self.aggregator.broadcast_model(d, suffix=suffix)
class DecisionTreeClientAggregator(object):
"""
secure aggregator for secureboosting Client, send histogram and numbers
"""
def __init__(self, verbose=False):
self.aggregator = SecureAggregatorClient(
secure_aggregate=True,
aggregate_type='sum',
communicate_match_suffix='tree_agg')
self.verbose = verbose
def send_histogram(self, hist: List[HistogramBag], suffix):
if self.verbose:
for idx, histbag in enumerate(hist):
LOGGER.debug('showing client hist {}'.format(histbag))
weights = FeatureHistogramWeights(list_of_histogram_bags=hist)
self.aggregator.send_model(weights, suffix=suffix)
def get_aggregated_root_info(self, suffix) -> Dict:
gh_dict = self.aggregator.get_aggregated_model(suffix=suffix)
return gh_dict
def send_local_root_node_info(self, g_sum, h_sum, suffix):
d = {'g_sum': g_sum, 'h_sum': h_sum}
dict_weights = DictWeights(d=d)
self.aggregator.send_model(dict_weights, suffix=suffix)
| 2,457 | 35.147059 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/homo/homo_decision_tree_arbiter.py
|
from typing import List
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from federatedml.transfer_variable.transfer_class.homo_decision_tree_transfer_variable import \
HomoDecisionTreeTransferVariable
from federatedml.util import consts
from federatedml.ensemble import DecisionTree
from federatedml.ensemble import Splitter
from federatedml.ensemble import HistogramBag
from federatedml.ensemble import SplitInfo
from federatedml.util import LOGGER
from federatedml.ensemble import DecisionTreeArbiterAggregator
class HomoDecisionTreeArbiter(DecisionTree):
def __init__(self, tree_param: DecisionTreeModelParam, valid_feature: dict, epoch_idx: int,
tree_idx: int, flow_id: int):
super(HomoDecisionTreeArbiter, self).__init__(tree_param)
self.splitter = Splitter(self.criterion_method, self.criterion_params, self.min_impurity_split,
self.min_sample_split, self.min_leaf_node, self.min_child_weight)
self.transfer_inst = HomoDecisionTreeTransferVariable()
"""
initializing here
"""
self.valid_features = valid_feature
self.tree_node = [] # start from root node
self.tree_node_num = 0
self.cur_layer_node = []
self.runtime_idx = 0
self.sitename = consts.ARBITER
self.epoch_idx = epoch_idx
self.tree_idx = tree_idx
# secure aggregator
self.set_flowid(flow_id)
self.aggregator = DecisionTreeArbiterAggregator(verbose=False)
# stored histogram for faster computation {node_id:histogram_bag}
self.stored_histograms = {}
def set_flowid(self, flowid=0):
LOGGER.info("set flowid, flowid is {}".format(flowid))
self.transfer_inst.set_flowid(flowid)
"""
Federation Functions
"""
def sync_node_sample_numbers(self, suffix):
cur_layer_node_num = self.transfer_inst.cur_layer_node_num.get(-1, suffix=suffix)
for num in cur_layer_node_num[1:]:
assert num == cur_layer_node_num[0]
return cur_layer_node_num[0]
def sync_best_splits(self, split_info, suffix):
LOGGER.debug('sending best split points')
self.transfer_inst.best_split_points.remote(split_info, idx=-1, suffix=suffix)
def sync_local_histogram(self, suffix) -> List[HistogramBag]:
node_local_histogram = self.aggregator.aggregate_histogram(suffix=suffix)
LOGGER.debug('num of histograms {}'.format(len(node_local_histogram)))
return node_local_histogram
"""
Split finding
"""
def federated_find_best_split(self, node_histograms, parallel_partitions=10) -> List[SplitInfo]:
LOGGER.debug('aggregating histograms')
acc_histogram = node_histograms
best_splits = self.splitter.find_split(acc_histogram, self.valid_features, parallel_partitions,
self.sitename, self.use_missing, self.zero_as_missing)
return best_splits
@staticmethod
def histogram_subtraction(left_node_histogram, stored_histograms):
# histogram subtraction
all_histograms = []
for left_hist in left_node_histogram:
all_histograms.append(left_hist)
# LOGGER.debug('hist id is {}, pid is {}'.format(left_hist.hid, left_hist.p_hid))
# root node hist
if left_hist.hid == 0:
continue
right_hist = stored_histograms[left_hist.p_hid] - left_hist
right_hist.hid, right_hist.p_hid = left_hist.hid + 1, right_hist.p_hid
all_histograms.append(right_hist)
return all_histograms
"""
Fit
"""
def fit(self):
LOGGER.info('begin to fit homo decision tree, epoch {}, tree idx {}'.format(self.epoch_idx, self.tree_idx))
g_sum, h_sum = self.aggregator.aggregate_root_node_info(suffix=('root_node_sync1', self.epoch_idx))
self.aggregator.broadcast_root_info(g_sum, h_sum, suffix=('root_node_sync2', self.epoch_idx))
if self.max_split_nodes != 0 and self.max_split_nodes % 2 == 1:
self.max_split_nodes += 1
LOGGER.warning('an even max_split_nodes value is suggested when using histogram-subtraction, '
'max_split_nodes reset to {}'.format(self.max_split_nodes))
tree_height = self.max_depth + 1 # non-leaf node height + 1 layer leaf
for dep in range(tree_height):
if dep + 1 == tree_height:
break
LOGGER.debug('current dep is {}'.format(dep))
split_info = []
# get cur layer node num:
cur_layer_node_num = self.sync_node_sample_numbers(suffix=(dep, self.epoch_idx, self.tree_idx))
layer_stored_hist = {}
for batch_id, i in enumerate(range(0, cur_layer_node_num, self.max_split_nodes)):
left_node_histogram = self.sync_local_histogram(suffix=(batch_id, dep, self.epoch_idx, self.tree_idx))
all_histograms = self.histogram_subtraction(left_node_histogram, self.stored_histograms)
# store histogram
for hist in all_histograms:
layer_stored_hist[hist.hid] = hist
# FIXME stable parallel_partitions
best_splits = self.federated_find_best_split(all_histograms, parallel_partitions=10)
split_info += best_splits
self.stored_histograms = layer_stored_hist
self.sync_best_splits(split_info, suffix=(dep, self.epoch_idx))
LOGGER.debug('best_splits_sent')
def predict(self, data_inst=None):
"""
Do nothing
"""
LOGGER.debug('start predicting')
"""
These functions are not needed in homo-decision-tree
"""
def initialize_root_node(self, *args):
pass
def compute_best_splits(self, *args):
pass
def assign_an_instance(self, *args):
pass
def assign_instances_to_new_node(self, *args):
pass
def update_tree(self, *args):
pass
def convert_bin_to_real(self, *args):
pass
def get_model_meta(self):
pass
def get_model_param(self):
pass
def set_model_param(self, model_param):
pass
def set_model_meta(self, model_meta):
pass
def traverse_tree(self, *args):
pass
def update_instances_node_positions(self, *args):
pass
| 6,520 | 33.321053 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_fast_decision_tree_host.py
|
import numpy as np
import functools
import copy
from federatedml.ensemble.basic_algorithms import HeteroDecisionTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core import tree_plan as plan
from federatedml.util import consts
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.feature.fate_element_type import NoneType
from federatedml.util import LOGGER
class HeteroFastDecisionTreeHost(HeteroDecisionTreeHost):
def __init__(self, tree_param):
super(HeteroFastDecisionTreeHost, self).__init__(tree_param)
self.node_plan = []
self.node_plan_idx = 0
self.tree_type = None
self.target_host_id = -1
self.guest_depth = 0
self.host_depth = 0
self.cur_dep = 0
self.self_host_id = -1
self.use_guest_feat_when_predict = False
self.tree_node = [] # keep tree structure for faster node dispatch
self.sample_leaf_pos = None # record leaf position of samples
"""
Setting
"""
def use_guest_feat_only_predict_mode(self):
self.use_guest_feat_when_predict = True
def set_tree_work_mode(self, tree_type, target_host_id):
self.tree_type, self.target_host_id = tree_type, target_host_id
def set_layered_depth(self, guest_depth, host_depth):
self.guest_depth, self.host_depth = guest_depth, host_depth
def set_self_host_id(self, self_host_id):
self.self_host_id = self_host_id
"""
Node Plan
"""
def initialize_node_plan(self):
if self.tree_type == plan.tree_type_dict['layered_tree']:
self.node_plan = plan.create_layered_tree_node_plan(guest_depth=self.guest_depth,
host_depth=self.host_depth,
host_list=self.host_party_idlist)
self.max_depth = len(self.node_plan)
LOGGER.debug('max depth reset to {}, cur node plan is {}'.format(self.max_depth, self.node_plan))
else:
self.node_plan = plan.create_node_plan(self.tree_type, self.target_host_id, self.max_depth)
def get_node_plan(self, idx):
return self.node_plan[idx]
"""
Host local split computation
"""
def get_host_split_info(self, splitinfo_host, federated_best_splitinfo_host):
final_splitinfos = []
for i in range(len(splitinfo_host)):
best_idx, best_gain = federated_best_splitinfo_host[i]
if best_idx != -1:
LOGGER.debug('sitename is {}, self.sitename is {}'
.format(splitinfo_host[i][best_idx].sitename, self.sitename))
assert splitinfo_host[i][best_idx].sitename == self.sitename
splitinfo = splitinfo_host[i][best_idx]
splitinfo.best_fid = splitinfo.best_fid
assert splitinfo.best_fid is not None
splitinfo.best_bid = splitinfo.best_bid
splitinfo.missing_dir = splitinfo.missing_dir
splitinfo.gain = best_gain
else:
splitinfo = SplitInfo(sitename=self.sitename, best_fid=-1, best_bid=-1, gain=best_gain)
final_splitinfos.append(splitinfo)
return final_splitinfos
def compute_best_splits_with_node_plan(self, tree_action, target_host_id, cur_to_split_nodes,
node_map: dict, dep: int, batch: int,
mode=consts.LAYERED_TREE):
if tree_action == plan.tree_actions['host_only'] and target_host_id == self.self_host_id:
data = self.data_with_node_assignments
inst2node_idx = self.get_computing_inst2node_idx()
node_sample_count = self.count_node_sample_num(inst2node_idx, node_map)
LOGGER.debug('sample count is {}'.format(node_sample_count))
acc_histograms = self.get_local_histograms(dep, data, self.grad_and_hess, node_sample_count,
cur_to_split_nodes, node_map, ret='tb',
hist_sub=True)
split_info_table = self.splitter.host_prepare_split_points(
histograms=acc_histograms,
use_missing=self.use_missing,
valid_features=self.valid_features,
sitename=self.sitename,
left_missing_dir=self.missing_dir_mask_left[dep],
right_missing_dir=self.missing_dir_mask_right[dep],
mask_id_mapping=self.fid_bid_random_mapping,
batch_size=self.bin_num,
cipher_compressor=self.cipher_compressor,
shuffle_random_seed=np.abs(hash((dep, batch))))
# test split info encryption
self.transfer_inst.encrypted_splitinfo_host.remote(split_info_table,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch))
best_split_info = self.transfer_inst.federated_best_splitinfo_host.get(suffix=(dep, batch), idx=0)
unmasked_split_info = self.unmask_split_info(best_split_info, self.inverse_fid_bid_random_mapping,
self.missing_dir_mask_left[dep],
self.missing_dir_mask_right[dep])
if mode == consts.LAYERED_TREE:
self.record_split_info(unmasked_split_info)
elif mode == consts.MIX_TREE:
return unmasked_split_info
else:
LOGGER.debug('skip host computation')
return None
"""
Host Local Tree update
"""
def update_host_side_tree(self, split_info, reach_max_depth):
LOGGER.info("update tree node, splitlist length is {}, tree node queue size is {}".format(
len(split_info), len(self.cur_layer_nodes)))
new_tree_node_queue = []
for i in range(len(self.cur_layer_nodes)):
sum_grad = self.cur_layer_nodes[i].sum_grad
sum_hess = self.cur_layer_nodes[i].sum_hess
# when host node can not be further split, fid/bid is set to -1
if reach_max_depth or split_info[i].best_fid == -1:
self.cur_layer_nodes[i].is_leaf = True
else:
self.cur_layer_nodes[i].left_nodeid = self.tree_node_num + 1
self.cur_layer_nodes[i].right_nodeid = self.tree_node_num + 2
self.tree_node_num += 2
left_node = Node(id=self.cur_layer_nodes[i].left_nodeid,
sitename=self.sitename,
sum_grad=split_info[i].sum_grad,
sum_hess=split_info[i].sum_hess,
parent_nodeid=self.cur_to_split_nodes[i].id
)
right_node = Node(id=self.cur_layer_nodes[i].right_nodeid,
sitename=self.sitename,
sum_grad=sum_grad - split_info[i].sum_grad,
sum_hess=sum_hess - split_info[i].sum_hess,
parent_nodeid=self.cur_layer_nodes[i].id
)
new_tree_node_queue.append(left_node)
new_tree_node_queue.append(right_node)
self.cur_layer_nodes[i].sitename = split_info[i].sitename
self.cur_layer_nodes[i].fid = split_info[i].best_fid
self.cur_layer_nodes[i].bid = split_info[i].best_bid
self.cur_layer_nodes[i].missing_dir = split_info[i].missing_dir
split_info[i].gain = 0
self.update_feature_importance(split_info[i], record_site_name=False)
self.tree_node.append(self.cur_layer_nodes[i])
self.cur_layer_nodes = new_tree_node_queue
@staticmethod
def host_assign_an_instance(value, tree_, bin_sparse_points, use_missing, zero_as_missing):
unleaf_state, nodeid = value[1]
if tree_[nodeid].is_leaf is True:
return nodeid
next_layer_nid = HeteroFastDecisionTreeHost.go_next_layer(tree_[nodeid], value[0], use_missing,
zero_as_missing, bin_sparse_points)
return 1, next_layer_nid
def host_local_assign_instances_to_new_node(self):
assign_node_method = functools.partial(self.host_assign_an_instance,
tree_=self.tree_node,
bin_sparse_points=self.bin_sparse_points,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing
)
assign_result = self.data_with_node_assignments.mapValues(assign_node_method)
leaf = assign_result.filter(lambda key, value: isinstance(value, tuple) is False)
if self.sample_leaf_pos is None:
self.sample_leaf_pos = leaf
else:
self.sample_leaf_pos = self.sample_leaf_pos.union(leaf)
assign_result = assign_result.subtractByKey(leaf)
return assign_result
"""
Federation Functions
"""
def sync_sample_leaf_pos(self, sample_leaf_pos):
LOGGER.debug('final sample pos sent')
self.transfer_inst.dispatch_node_host_result.remote(sample_leaf_pos, idx=0,
suffix=('final sample pos',), role=consts.GUEST)
def sync_leaf_nodes(self):
leaves = []
for node in self.tree_node:
if node.is_leaf:
leaves.append(node)
to_send_leaves = copy.deepcopy(leaves)
self.transfer_inst.host_leafs.remote(to_send_leaves)
def sync_cur_layer_nodes(self, nodes, dep):
# self.mask_node_id(copy.deepcopy(nodes))
self.transfer_inst.host_cur_to_split_node_num. \
remote(nodes, idx=0, role=consts.GUEST, suffix=(dep,))
"""
Pre/Post Process
"""
def process_leaves_info(self):
# remove g/h info and rename leaves
# record node info
for node in self.tree_node:
node.sum_grad = None
node.sum_hess = None
if node.is_leaf:
node.sitename = consts.GUEST
else:
self.split_maskdict[node.id] = node.bid
self.missing_dir_maskdict[node.id] = node.missing_dir
def mask_node_id(self, nodes):
for n in nodes:
n.id = -1
return nodes
def convert_bin_to_real2(self):
"""
convert current bid in tree nodes to real value
"""
for node in self.tree_node:
if not node.is_leaf:
node.bid = self.bin_split_points[node.fid][node.bid]
"""
Mix Mode
"""
def sync_en_g_sum_h_sum(self):
gh_list = self.transfer_inst.encrypted_grad_and_hess.get(idx=0, suffix='ghsum')
g_sum, h_sum = gh_list
return g_sum, h_sum
def mix_mode_fit(self):
LOGGER.info('running mix mode')
if self.tree_type == plan.tree_type_dict['guest_feat_only']:
LOGGER.debug('this tree uses guest feature only, skip')
return
if self.self_host_id != self.target_host_id:
LOGGER.debug('not selected host, skip')
return
LOGGER.debug('use local host feature to build tree')
self.init_compressor_and_sync_gh()
root_sum_grad, root_sum_hess = self.sync_en_g_sum_h_sum()
self.inst2node_idx = self.assign_instance_to_root_node(self.data_bin,
root_node_id=0) # root node id is 0
self.cur_layer_nodes = [Node(id=0, sitename=self.sitename, sum_grad=root_sum_grad, sum_hess=root_sum_hess, )]
for dep in range(self.max_depth):
tree_action, layer_target_host_id = self.get_node_plan(dep)
# for split point masking
self.generate_split_point_masking_variable(dep)
self.sync_cur_layer_nodes(self.cur_layer_nodes, dep)
if len(self.cur_layer_nodes) == 0:
break
self.update_instances_node_positions()
batch = 0
split_info = []
for i in range(0, len(self.cur_layer_nodes), self.max_split_nodes):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
batch_split_info = self.compute_best_splits_with_node_plan(
tree_action, layer_target_host_id, cur_to_split_nodes=self.cur_to_split_nodes,
node_map=self.get_node_map(
self.cur_to_split_nodes), dep=dep, batch=batch, mode=consts.MIX_TREE)
batch += 1
split_info.extend(batch_split_info)
self.update_host_side_tree(split_info, reach_max_depth=False)
self.inst2node_idx = self.host_local_assign_instances_to_new_node()
if self.cur_layer_nodes:
self.update_host_side_tree([], reach_max_depth=True) # mark final layer nodes as leaves
self.update_instances_node_positions() # update instances position
self.host_local_assign_instances_to_new_node() # assign instances to final leaves
self.convert_bin_to_real2() # convert bin num to val
self.sync_leaf_nodes() # send leaf nodes to guest
self.process_leaves_info() # remove encrypted g/h
self.sync_sample_leaf_pos(self.sample_leaf_pos) # sync sample final leaf positions
@staticmethod
def host_local_traverse_tree(data_inst, tree_node, use_missing=True, zero_as_missing=True):
nid = 0 # root node id
while True:
if tree_node[nid].is_leaf:
return nid
cur_node = tree_node[nid]
fid, bid = cur_node.fid, cur_node.bid
missing_dir = cur_node.missing_dir
if use_missing and zero_as_missing:
if data_inst.features.get_data(fid) == NoneType() or data_inst.features.get_data(fid, None) is None:
nid = tree_node[nid].right_nodeid if missing_dir == 1 else tree_node[nid].left_nodeid
elif data_inst.features.get_data(fid) <= bid:
nid = tree_node[nid].left_nodeid
else:
nid = tree_node[nid].right_nodeid
elif data_inst.features.get_data(fid) == NoneType():
nid = tree_node[nid].right_nodeid if missing_dir == 1 else tree_node[nid].left_nodeid
elif data_inst.features.get_data(fid, 0) <= bid:
nid = tree_node[nid].left_nodeid
else:
nid = tree_node[nid].right_nodeid
def mix_mode_predict(self, data_inst):
LOGGER.debug('running mix mode predict')
if not self.use_guest_feat_when_predict and self.target_host_id == self.self_host_id:
LOGGER.info('predicting using local nodes')
traverse_tree = functools.partial(self.host_local_traverse_tree,
tree_node=self.tree_node,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing, )
leaf_nodes = data_inst.mapValues(traverse_tree)
LOGGER.debug('leaf nodes count is {}'.format(leaf_nodes.count()))
self.sync_sample_leaf_pos(leaf_nodes)
else:
LOGGER.info('this tree belongs to other parties, skip prediction')
# sync status
_ = self.transfer_inst.sync_flag.get(idx=0)
"""
Layered Mode
"""
def layered_mode_fit(self):
LOGGER.info('running layered mode')
self.initialize_node_plan()
self.init_compressor_and_sync_gh()
for dep in range(self.max_depth):
tree_action, layer_target_host_id = self.get_node_plan(dep)
# for split point masking
self.generate_split_point_masking_variable(dep)
self.sync_tree_node_queue(dep)
if len(self.cur_layer_nodes) == 0:
break
if self.self_host_id == layer_target_host_id:
self.inst2node_idx = self.sync_node_positions(dep)
self.update_instances_node_positions()
batch = 0
for i in range(0, len(self.cur_layer_nodes), self.max_split_nodes):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
self.compute_best_splits_with_node_plan(tree_action, layer_target_host_id,
cur_to_split_nodes=self.cur_to_split_nodes,
node_map=self.get_node_map(self.cur_to_split_nodes),
dep=dep, batch=batch,
mode=consts.LAYERED_TREE)
batch += 1
if layer_target_host_id == self.self_host_id:
dispatch_node_host = self.sync_dispatch_node_host(dep)
self.assign_instances_to_new_node(dispatch_node_host, dep)
self.sync_tree()
self.convert_bin_to_real(self.split_maskdict)
self.collect_host_split_feat_importance()
"""
Fit & Predict
"""
def fit(self):
LOGGER.info("begin to fit fast host decision tree")
self.initialize_node_plan()
if self.tree_type == plan.tree_type_dict['guest_feat_only'] or \
self.tree_type == plan.tree_type_dict['host_feat_only']:
self.mix_mode_fit()
else:
self.layered_mode_fit()
LOGGER.info("end to fit host decision tree")
def predict(self, data_inst):
LOGGER.info("start to predict!")
if self.tree_type == plan.tree_type_dict['guest_feat_only'] or \
self.tree_type == plan.tree_type_dict['host_feat_only']:
self.mix_mode_predict(data_inst)
else:
LOGGER.debug('running layered mode predict')
super(HeteroFastDecisionTreeHost, self).predict(data_inst)
LOGGER.info('predict done')
def get_model_meta(self):
return super(HeteroFastDecisionTreeHost, self).get_model_meta()
def get_model_param(self):
return super(HeteroFastDecisionTreeHost, self).get_model_param()
| 19,006 | 39.613248 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_fast_decision_tree_guest.py
|
import functools
import copy
from federatedml.ensemble.basic_algorithms import HeteroDecisionTreeGuest
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core import tree_plan as plan
from federatedml.util import consts
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.util import LOGGER
class HeteroFastDecisionTreeGuest(HeteroDecisionTreeGuest):
def __init__(self, tree_param):
super(HeteroFastDecisionTreeGuest, self).__init__(tree_param)
self.node_plan = []
self.node_plan_idx = 0
self.tree_type = None
self.target_host_id = -1
self.guest_depth = 0
self.host_depth = 0
self.cur_dep = 0
self.use_guest_feat_when_predict = False
"""
Setting
"""
def use_guest_feat_only_predict_mode(self):
self.use_guest_feat_when_predict = True
def set_tree_work_mode(self, tree_type, target_host_id):
self.tree_type, self.target_host_id = tree_type, target_host_id
def set_layered_depth(self, guest_depth, host_depth):
self.guest_depth, self.host_depth = guest_depth, host_depth
"""
Tree Plan
"""
def initialize_node_plan(self):
if self.tree_type == plan.tree_type_dict['layered_tree']:
self.node_plan = plan.create_layered_tree_node_plan(guest_depth=self.guest_depth,
host_depth=self.host_depth,
host_list=self.host_party_idlist)
self.max_depth = len(self.node_plan)
LOGGER.info('max depth reset to {}, cur node plan is {}'.format(self.max_depth, self.node_plan))
else:
self.node_plan = plan.create_node_plan(self.tree_type, self.target_host_id, self.max_depth)
def get_node_plan(self, idx):
return self.node_plan[idx]
def host_id_to_idx(self, host_id):
if host_id == -1:
return -1
return self.host_party_idlist.index(host_id)
"""
Compute split point
"""
def compute_best_splits_with_node_plan(self, tree_action, target_host_idx, cur_to_split_nodes, node_map: dict,
dep: int, batch_idx: int, mode=consts.MIX_TREE):
LOGGER.debug('node plan at dep {} is {}'.format(dep, (tree_action, target_host_idx)))
# In layered mode, guest hist computation does not start from root node, so need to disable hist-sub
hist_sub = True if mode == consts.MIX_TREE else False
if tree_action == plan.tree_actions['guest_only']:
inst2node_idx = self.get_computing_inst2node_idx()
node_sample_count = self.count_node_sample_num(inst2node_idx, node_map)
LOGGER.debug('sample count is {}'.format(node_sample_count))
acc_histograms = self.get_local_histograms(dep, self.data_with_node_assignments, self.grad_and_hess,
node_sample_count, cur_to_split_nodes, node_map, ret='tensor',
hist_sub=hist_sub)
best_split_info_guest = self.splitter.find_split(acc_histograms, self.valid_features,
self.data_bin.partitions, self.sitename,
self.use_missing, self.zero_as_missing)
return best_split_info_guest
if tree_action == plan.tree_actions['host_only']:
split_info_table = self.transfer_inst.encrypted_splitinfo_host.get(
idx=target_host_idx, suffix=(dep, batch_idx))
host_split_info = self.splitter.find_host_best_split_info(
split_info_table, self.get_host_sitename(target_host_idx), self.encrypter, gh_packer=self.packer)
split_info_list = [None for i in range(len(host_split_info))]
for key in host_split_info:
split_info_list[node_map[key]] = host_split_info[key]
if mode == consts.MIX_TREE:
for split_info in split_info_list:
split_info.sum_grad, split_info.sum_hess, split_info.gain = self.encrypt(split_info.sum_grad), \
self.encrypt(split_info.sum_hess), \
self.encrypt(split_info.gain)
return_split_info = split_info_list
else:
return_split_info = copy.deepcopy(split_info_list)
for split_info in return_split_info:
split_info.sum_grad, split_info.sum_hess, split_info.gain = None, None, None
self.transfer_inst.federated_best_splitinfo_host.remote(return_split_info,
suffix=(dep, batch_idx),
idx=target_host_idx,
role=consts.HOST)
if mode == consts.MIX_TREE:
return []
elif mode == consts.LAYERED_TREE:
cur_best_split = self.merge_splitinfo(splitinfo_guest=[],
splitinfo_host=[split_info_list],
merge_host_split_only=True,
need_decrypt=False)
return cur_best_split
"""
Tree update
"""
def assign_instances_to_new_node_with_node_plan(self, dep, tree_action, mode=consts.MIX_TREE, ):
LOGGER.info("redispatch node of depth {}".format(dep))
dispatch_node_method = functools.partial(self.assign_an_instance,
tree_=self.tree_node,
decoder=self.decode,
sitename=self.sitename,
split_maskdict=self.split_maskdict,
bin_sparse_points=self.bin_sparse_points,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
dispatch_guest_result = self.data_with_node_assignments.mapValues(dispatch_node_method)
LOGGER.info("remask dispatch node result of depth {}".format(dep))
dispatch_to_host_result = dispatch_guest_result.filter(
lambda key, value: isinstance(value, tuple) and len(value) > 2)
dispatch_guest_result = dispatch_guest_result.subtractByKey(dispatch_to_host_result)
leaf = dispatch_guest_result.filter(lambda key, value: isinstance(value, tuple) is False)
if self.sample_leaf_pos is None:
self.sample_leaf_pos = leaf
else:
self.sample_leaf_pos = self.sample_leaf_pos.union(leaf)
dispatch_guest_result = dispatch_guest_result.subtractByKey(leaf)
if tree_action == plan.tree_actions['host_only'] and mode == consts.LAYERED_TREE:
dispatch_guest_result = dispatch_guest_result.subtractByKey(leaf)
dispatch_node_host_result = self.sync_dispatch_node_host(dispatch_to_host_result, dep, idx=-1)
self.inst2node_idx = None
for idx in range(len(dispatch_node_host_result)):
if self.inst2node_idx is None:
self.inst2node_idx = dispatch_node_host_result[idx]
else:
self.inst2node_idx = self.inst2node_idx.join(dispatch_node_host_result[idx],
lambda unleaf_state_nodeid1,
unleaf_state_nodeid2:
unleaf_state_nodeid1 if len(
unleaf_state_nodeid1) == 2 else
unleaf_state_nodeid2)
self.inst2node_idx = self.inst2node_idx.union(dispatch_guest_result)
else:
LOGGER.debug('skip host only inst2node_idx computation')
self.inst2node_idx = dispatch_guest_result
"""
Layered Mode
"""
def layered_mode_fit(self):
LOGGER.info('running layered mode')
self.initialize_node_plan()
self.init_packer_and_sync_gh()
root_node = self.initialize_root_node()
self.cur_layer_nodes = [root_node]
self.inst2node_idx = self.assign_instance_to_root_node(self.data_bin, root_node_id=root_node.id)
for dep in range(self.max_depth):
tree_action, layer_target_host_id = self.get_node_plan(dep)
host_idx = self.host_id_to_idx(layer_target_host_id)
self.sync_cur_to_split_nodes(self.cur_layer_nodes, dep, idx=-1)
if len(self.cur_layer_nodes) == 0:
break
if layer_target_host_id != -1:
self.sync_node_positions(dep, idx=-1)
self.update_instances_node_positions()
split_info = []
for batch_idx, i in enumerate(range(0, len(self.cur_layer_nodes), self.max_split_nodes)):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
cur_splitinfos = self.compute_best_splits_with_node_plan(
tree_action,
host_idx,
node_map=self.get_node_map(self.cur_to_split_nodes),
cur_to_split_nodes=self.cur_to_split_nodes,
dep=dep,
batch_idx=batch_idx,
mode=consts.LAYERED_TREE)
split_info.extend(cur_splitinfos)
self.update_tree(split_info, False)
self.assign_instances_to_new_node_with_node_plan(dep, tree_action, mode=consts.LAYERED_TREE, )
if self.cur_layer_nodes:
self.assign_instance_to_leaves_and_update_weights()
self.convert_bin_to_real()
self.round_leaf_val()
self.sync_tree(idx=-1)
self.sample_weights_post_process()
"""
Mix Mode
"""
def sync_en_g_sum_h_sum(self):
root_sum_grad, root_sum_hess = self.get_grad_hess_sum(self.grad_and_hess)
en_g, en_h = self.encrypt(root_sum_grad), self.encrypt(root_sum_hess)
self.transfer_inst.encrypted_grad_and_hess.remote(idx=self.host_id_to_idx(self.target_host_id),
obj=[en_g, en_h], suffix='ghsum', role=consts.HOST)
def mix_mode_fit(self):
LOGGER.info('running mix mode')
self.initialize_node_plan()
if self.tree_type != plan.tree_type_dict['guest_feat_only']:
self.init_packer_and_sync_gh(idx=self.host_id_to_idx(self.target_host_id))
self.sync_en_g_sum_h_sum()
else:
root_node = self.initialize_root_node()
self.cur_layer_nodes = [root_node]
self.inst2node_idx = self.assign_instance_to_root_node(self.data_bin, root_node_id=root_node.id)
for dep in range(self.max_depth):
tree_action, layer_target_host_id = self.get_node_plan(dep)
host_idx = self.host_id_to_idx(layer_target_host_id)
# get cur_layer_node_num
if self.tree_type == plan.tree_type_dict['host_feat_only']:
self.cur_layer_nodes = self.sync_host_cur_layer_nodes(dep, host_idx)
if len(self.cur_layer_nodes) == 0:
break
if self.tree_type == plan.tree_type_dict['guest_feat_only']:
self.update_instances_node_positions()
split_info = []
for batch_idx, i in enumerate(range(0, len(self.cur_layer_nodes), self.max_split_nodes)):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
cur_splitinfos = self.compute_best_splits_with_node_plan(tree_action, host_idx,
node_map=self.get_node_map(
self.cur_to_split_nodes),
cur_to_split_nodes=self.cur_to_split_nodes,
dep=dep, batch_idx=batch_idx,
mode=consts.MIX_TREE)
split_info.extend(cur_splitinfos)
if self.tree_type == plan.tree_type_dict['guest_feat_only']:
self.update_tree(split_info, False)
self.assign_instances_to_new_node_with_node_plan(dep, tree_action, host_idx)
if self.tree_type == plan.tree_type_dict['host_feat_only']:
target_idx = self.host_id_to_idx(self.get_node_plan(0)[1]) # get host id
leaves = self.sync_host_leaf_nodes(target_idx) # get leaves node from host
self.tree_node = self.handle_leaf_nodes(leaves) # decrypt node info
self.sample_leaf_pos = self.sync_sample_leaf_pos(idx=target_idx) # get final sample leaf id from host
# checking sample number
assert self.sample_leaf_pos.count() == self.data_bin.count(), 'numbers of sample positions failed to match, ' \
'sample leaf pos number:{}, instance number {}'. \
format(self.sample_leaf_pos.count(), self.data_bin.count())
else:
if self.cur_layer_nodes:
self.assign_instance_to_leaves_and_update_weights() # guest local updates
self.convert_bin_to_real() # convert bin id to real value features
self.sample_weights_post_process()
self.round_leaf_val()
def mix_mode_predict(self, data_inst):
LOGGER.info("running mix mode predict")
if self.use_guest_feat_when_predict:
LOGGER.debug('predicting using guest local tree')
predict_data = data_inst.mapValues(lambda inst: (0, 1))
traverse_tree = functools.partial(self.traverse_tree,
tree_=self.tree_node,
decoder=self.decode,
sitename=self.sitename,
split_maskdict=self.split_maskdict,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
predict_result = predict_data.join(data_inst, traverse_tree)
LOGGER.debug('guest_predict_inst_count is {}'.format(predict_result.count()))
else:
LOGGER.debug('predicting using host local tree')
leaf_node_info = self.sync_sample_leaf_pos(idx=self.host_id_to_idx(self.target_host_id))
predict_result = self.extract_sample_weights_from_node(leaf_node_info)
self.transfer_inst.sync_flag.remote(True, idx=-1)
return predict_result
"""
Federation Functions
"""
def sync_sample_leaf_pos(self, idx):
leaf_pos = self.transfer_inst.dispatch_node_host_result.get(idx=idx, suffix=('final sample pos',))
return leaf_pos
def sync_host_cur_layer_nodes(self, dep, host_idx):
nodes = self.transfer_inst.host_cur_to_split_node_num.get(idx=host_idx, suffix=(dep,))
for n in nodes:
n.sum_grad = self.decrypt(n.sum_grad)
n.sum_hess = self.decrypt(n.sum_hess)
return nodes
def sync_host_leaf_nodes(self, idx):
return self.transfer_inst.host_leafs.get(idx=idx)
"""
Mix Functions
"""
@staticmethod
def get_node_weights(node_id, tree_nodes):
return tree_nodes[node_id].weight
def extract_sample_weights_from_node(self, sample_leaf_pos):
"""
Given a dtable contains leaf positions of samples, return leaf weights
"""
func = functools.partial(self.get_node_weights, tree_nodes=self.tree_node)
sample_weights = sample_leaf_pos.mapValues(func)
return sample_weights
def handle_leaf_nodes(self, nodes):
"""
decrypte hess and grad and return tree node list that only contains leaves
"""
max_node_id = -1
for n in nodes:
n.sum_hess = self.decrypt(n.sum_hess)
n.sum_grad = self.decrypt(n.sum_grad)
n.weight = self.splitter.node_weight(n.sum_grad, n.sum_hess)
n.sitename = self.sitename
if n.id > max_node_id:
max_node_id = n.id
new_nodes = [Node() for i in range(max_node_id + 1)]
for n in nodes:
new_nodes[n.id] = n
return new_nodes
"""
Fit & Predict
"""
def fit(self):
LOGGER.info('fitting a hetero decision tree')
if self.tree_type == plan.tree_type_dict['host_feat_only'] or \
self.tree_type == plan.tree_type_dict['guest_feat_only']:
self.mix_mode_fit()
elif self.tree_type == plan.tree_type_dict['layered_tree']:
self.layered_mode_fit()
LOGGER.info("end to fit guest decision tree")
def predict(self, data_inst):
LOGGER.info("start to predict!")
if self.tree_type == plan.tree_type_dict['guest_feat_only'] or \
self.tree_type == plan.tree_type_dict['host_feat_only']:
predict_res = self.mix_mode_predict(data_inst)
LOGGER.debug('input result count {} , out count {}'.format(data_inst.count(), predict_res.count()))
return predict_res
else:
LOGGER.debug('running layered mode predict')
return super(HeteroFastDecisionTreeGuest, self).predict(data_inst)
def get_model_meta(self):
return super(HeteroFastDecisionTreeGuest, self).get_model_meta()
def get_model_param(self):
return super(HeteroFastDecisionTreeGuest, self).get_model_param()
| 18,485 | 44.308824 | 124 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_decision_tree_guest.py
|
import copy
import functools
from fate_arch.session import computing_session as session
from federatedml.util import LOGGER
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import CriterionMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from federatedml.transfer_variable.transfer_class.hetero_decision_tree_transfer_variable import \
HeteroDecisionTreeTransferVariable
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim import GHPacker
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.util import consts
class HeteroDecisionTreeGuest(DecisionTree):
def __init__(self, tree_param):
super(HeteroDecisionTreeGuest, self).__init__(tree_param)
# In FATE-1.8 reset feature importance to 'split'
self.feature_importance_type = 'split'
self.encrypter = None
self.transfer_inst = HeteroDecisionTreeTransferVariable()
self.sitename = consts.GUEST # will be modified in self.set_runtime_idx()
self.complete_secure_tree = False
self.split_maskdict = {} # save split value
self.missing_dir_maskdict = {} # save missing dir
self.host_party_idlist = []
self.compressor = None
# goss subsample
self.run_goss = False
# cipher compressing
self.task_type = None
self.run_cipher_compressing = True
self.packer = None
self.max_sample_weight = 1
self.objective = None
# code version control
self.new_ver = True
# mo tree
self.mo_tree = False
self.class_num = 1
"""
Node Encode/ Decode
"""
def encode(self, etype="feature_idx", val=None, nid=None):
if etype == "feature_idx":
return val
if etype == "feature_val":
self.split_maskdict[nid] = val
return None
if etype == "missing_dir":
self.missing_dir_maskdict[nid] = val
return None
raise TypeError("encode type %s is not support!" % (str(etype)))
@staticmethod
def decode(dtype="feature_idx", val=None, nid=None, split_maskdict=None, missing_dir_maskdict=None):
if dtype == "feature_idx":
return val
if dtype == "feature_val":
if nid in split_maskdict:
return split_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
if dtype == "missing_dir":
if nid in missing_dir_maskdict:
return missing_dir_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
return TypeError("decode type %s is not support!" % (str(dtype)))
"""
Setting
"""
def set_host_party_idlist(self, id_list):
self.host_party_idlist = id_list
def report_init_status(self):
LOGGER.info('reporting initialization status')
LOGGER.info('using new version code {}'.format(self.new_ver))
if self.complete_secure_tree:
LOGGER.info('running complete secure')
if self.run_goss:
LOGGER.info('sampled g_h count is {}, total sample num is {}'.format(self.grad_and_hess.count(),
self.data_bin.count()))
if self.run_cipher_compressing:
LOGGER.info('running cipher compressing')
LOGGER.info('updated max sample weight is {}'.format(self.max_sample_weight))
if self.deterministic:
LOGGER.info('running on deterministic mode')
def init(self, flowid, runtime_idx, data_bin, bin_split_points, bin_sparse_points, valid_features,
grad_and_hess,
encrypter,
host_party_list,
task_type,
class_num=1,
complete_secure=False,
objective=None,
goss_subsample=False,
cipher_compressing=False,
max_sample_weight=1,
new_ver=True,
mo_tree=False
):
super(HeteroDecisionTreeGuest, self).init_data_and_variable(flowid, runtime_idx, data_bin, bin_split_points,
bin_sparse_points, valid_features, grad_and_hess)
self.check_max_split_nodes()
self.encrypter = encrypter
self.complete_secure_tree = complete_secure
self.host_party_idlist = host_party_list
self.run_goss = goss_subsample
self.run_cipher_compressing = cipher_compressing
self.max_sample_weight = max_sample_weight
self.task_type = task_type
self.mo_tree = mo_tree
self.objective = objective
if self.mo_tree: # when mo mode is activated, need class number
self.class_num = class_num
else:
self.class_num = 1
self.new_ver = new_ver
self.report_init_status()
"""
Encrypt/ Decrypt
"""
def encrypt(self, val):
return self.encrypter.encrypt(val)
def decrypt(self, val):
return self.encrypter.decrypt(val)
"""
Node Splitting
"""
def get_host_sitename(self, host_idx):
host_party_id = self.host_party_idlist[host_idx]
host_sitename = ":".join([consts.HOST, str(host_party_id)])
return host_sitename
def find_host_split(self, value):
cur_split_node, encrypted_splitinfo_host = value
sum_grad = cur_split_node.sum_grad
sum_hess = cur_split_node.sum_hess
best_gain = self.min_impurity_split - consts.FLOAT_ZERO
best_idx = -1
perform_recorder = {}
gains = []
for i in range(len(encrypted_splitinfo_host)):
sum_grad_l, sum_hess_l = encrypted_splitinfo_host[i]
sum_grad_l = self.decrypt(sum_grad_l)
sum_hess_l = self.decrypt(sum_hess_l)
sum_grad_r = sum_grad - sum_grad_l
sum_hess_r = sum_hess - sum_hess_l
gain = self.splitter.split_gain(sum_grad, sum_hess, sum_grad_l,
sum_hess_l, sum_grad_r, sum_hess_r)
perform_recorder[i] = gain
gains.append(gain)
if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
best_gain = gain
best_idx = i
encrypted_best_gain = self.encrypt(best_gain)
return best_idx, encrypted_best_gain, best_gain
def find_best_split_guest_and_host(self, splitinfo_guest_host, need_decrypt=True):
best_gain_host = self.decrypt(splitinfo_guest_host[1].gain) if need_decrypt else splitinfo_guest_host[1].gain
best_gain_host_idx = 1
for i in range(1, len(splitinfo_guest_host)):
gain_host_i = self.decrypt(splitinfo_guest_host[i].gain) if need_decrypt else splitinfo_guest_host[i].gain
if best_gain_host < gain_host_i - consts.FLOAT_ZERO:
best_gain_host = gain_host_i
best_gain_host_idx = i
# if merge_host_split_only is True, guest split-info is None
# first one at 0 index is the best split of guest
if splitinfo_guest_host[0] is not None and \
splitinfo_guest_host[0].gain >= best_gain_host - consts.FLOAT_ZERO:
best_splitinfo = splitinfo_guest_host[0]
else:
best_splitinfo = splitinfo_guest_host[best_gain_host_idx]
# when this node can not be further split, host sum_grad and sum_hess is not an encrypted number but 0
# so need type checking here
if need_decrypt:
best_splitinfo.sum_grad = self.decrypt(best_splitinfo.sum_grad) \
if not isinstance(best_splitinfo.sum_grad, int) else best_splitinfo.sum_grad
best_splitinfo.sum_hess = self.decrypt(best_splitinfo.sum_hess) \
if not isinstance(best_splitinfo.sum_hess, int) else best_splitinfo.sum_hess
best_splitinfo.gain = best_gain_host
return best_splitinfo
def merge_splitinfo(self, splitinfo_guest, splitinfo_host, merge_host_split_only=False, need_decrypt=True):
LOGGER.info("merging splitinfo, merge_host_split_only is {}".format(merge_host_split_only))
if merge_host_split_only:
splitinfo_guest = [None for i in range(len(splitinfo_host[0]))]
merge_infos = []
for i in range(len(splitinfo_guest)):
splitinfo = [splitinfo_guest[i]]
for j in range(len(splitinfo_host)):
splitinfo.append(splitinfo_host[j][i])
merge_infos.append(splitinfo)
splitinfo_guest_host_table = session.parallelize(merge_infos,
include_key=False,
partition=self.data_bin.partitions)
find_split_func = functools.partial(self.find_best_split_guest_and_host, need_decrypt=need_decrypt)
best_splitinfo_table = splitinfo_guest_host_table.mapValues(find_split_func)
best_splitinfos = [None for i in range(len(merge_infos))]
for _, best_splitinfo in best_splitinfo_table.collect():
best_splitinfos[_] = best_splitinfo
return best_splitinfos
def federated_find_split(self, dep=-1, batch=-1, idx=-1):
LOGGER.info("federated find split of depth {}, batch {}".format(dep, batch))
# get flatten split points from hosts
# [split points from host 1, split point from host 2, .... so on] ↓
encrypted_splitinfo_host = self.sync_encrypted_splitinfo_host(dep, batch, idx=idx)
for host_idx in range(len(encrypted_splitinfo_host)):
LOGGER.debug('host sitename is {}'.format(self.get_host_sitename(host_idx)))
init_gain = self.min_impurity_split - consts.FLOAT_ZERO
encrypted_init_gain = self.encrypter.encrypt(init_gain)
# init encrypted gain for every nodes in cur layer
best_splitinfo_host = [[-1, encrypted_init_gain] for j in range(len(self.cur_to_split_nodes))]
# init best gain for every nodes in cur layer
best_gains = [init_gain for j in range(len(self.cur_to_split_nodes))]
# max split points to compute at a time, to control memory consumption
max_nodes = max(len(encrypted_splitinfo_host[host_idx][j]) for j in range(len(self.cur_to_split_nodes)))
# batch split point finding for every cur to split nodes
for k in range(0, max_nodes, consts.MAX_SPLITINFO_TO_COMPUTE):
batch_splitinfo_host = [encrypted_splitinfo[k: k + consts.MAX_SPLITINFO_TO_COMPUTE] for
encrypted_splitinfo
in encrypted_splitinfo_host[host_idx]]
encrypted_splitinfo_host_table = session.parallelize(zip(self.cur_to_split_nodes, batch_splitinfo_host),
include_key=False,
partition=self.data_bin.partitions)
splitinfos = encrypted_splitinfo_host_table.mapValues(self.find_host_split).collect()
# update best splitinfo and gain for every cur to split nodes
for node_idx, splitinfo in splitinfos:
if best_splitinfo_host[node_idx][0] == -1:
best_splitinfo_host[node_idx] = list(splitinfo[:2])
best_gains[node_idx] = splitinfo[2]
elif splitinfo[0] != -1 and splitinfo[2] > best_gains[node_idx] + consts.FLOAT_ZERO:
best_splitinfo_host[node_idx][0] = k + splitinfo[0]
best_splitinfo_host[node_idx][1] = splitinfo[1]
best_gains[node_idx] = splitinfo[2]
if idx != -1:
self.sync_federated_best_splitinfo_host(best_splitinfo_host, dep, batch, idx)
break
self.sync_federated_best_splitinfo_host(best_splitinfo_host, dep, batch, host_idx)
def get_computing_inst2node_idx(self):
if self.run_goss:
inst2node_idx = self.inst2node_idx.join(self.grad_and_hess, lambda x1, x2: x1)
else:
inst2node_idx = self.inst2node_idx
return inst2node_idx
def compute_best_splits(self, cur_to_split_nodes, node_map, dep, batch_idx):
LOGGER.info('solving node batch {}, node num is {}'.format(batch_idx, len(cur_to_split_nodes)))
inst2node_idx = self.get_computing_inst2node_idx()
node_sample_count = self.count_node_sample_num(inst2node_idx, node_map)
LOGGER.debug('sample count is {}'.format(node_sample_count))
acc_histograms = self.get_local_histograms(dep, self.data_with_node_assignments, self.grad_and_hess,
node_sample_count, cur_to_split_nodes, node_map, ret='tensor',
hist_sub=True)
best_split_info_guest = self.splitter.find_split(acc_histograms, self.valid_features,
self.data_bin.partitions, self.sitename,
self.use_missing, self.zero_as_missing)
if self.complete_secure_tree:
return best_split_info_guest
host_split_info_tables = self.transfer_inst.encrypted_splitinfo_host.get(idx=-1, suffix=(dep, batch_idx))
best_splits_of_all_hosts = []
for host_idx, split_info_table in enumerate(host_split_info_tables):
host_split_info = self.splitter.find_host_best_split_info(split_info_table,
self.get_host_sitename(host_idx),
self.encrypter,
gh_packer=self.packer)
split_info_list = [None for i in range(len(host_split_info))]
for key in host_split_info:
split_info_list[node_map[key]] = host_split_info[key]
return_split_info = copy.deepcopy(split_info_list)
for split_info in return_split_info:
split_info.sum_grad, split_info.sum_hess, split_info.gain = None, None, None
self.transfer_inst.federated_best_splitinfo_host.remote(return_split_info,
suffix=(dep, batch_idx), idx=host_idx,
role=consts.HOST)
best_splits_of_all_hosts.append(split_info_list)
final_best_splits = self.merge_splitinfo(best_split_info_guest, best_splits_of_all_hosts, need_decrypt=False)
return final_best_splits
"""
Federation Functions
"""
def init_packer_and_sync_gh(self, idx=-1):
if self.run_cipher_compressing:
g_min, g_max = None, None
if self.task_type == consts.REGRESSION:
self.grad_and_hess.schema = {'header': ['g', 'h']}
statistics = MultivariateStatisticalSummary(self.grad_and_hess, -1)
g_min = statistics.get_min()['g']
g_max = statistics.get_max()['g']
if self.objective == 'lse':
h_max = 2
elif self.objective == 'lae':
h_max = 1
else:
h_max = statistics.get_max()['h']
else:
h_max = None
self.packer = GHPacker(sample_num=self.grad_and_hess.count(),
task_type=self.task_type,
max_sample_weight=self.max_sample_weight,
encrypter=self.encrypter,
g_min=g_min,
g_max=g_max,
h_max=h_max,
mo_mode=self.mo_tree, # mo packing
class_num=self.class_num # no mo packing
)
en_grad_hess = self.packer.pack_and_encrypt(self.grad_and_hess)
else:
en_grad_hess = self.encrypter.distribute_encrypt(self.grad_and_hess)
LOGGER.info('sending g/h to host')
self.transfer_inst.encrypted_grad_and_hess.remote(en_grad_hess,
role=consts.HOST,
idx=idx)
def sync_cur_to_split_nodes(self, cur_to_split_node, dep=-1, idx=-1):
LOGGER.info("send tree node queue of depth {}".format(dep))
mask_tree_node_queue = copy.deepcopy(cur_to_split_node)
for i in range(len(mask_tree_node_queue)):
mask_tree_node_queue[i] = Node(id=mask_tree_node_queue[i].id,
parent_nodeid=mask_tree_node_queue[i].parent_nodeid,
is_left_node=mask_tree_node_queue[i].is_left_node)
self.transfer_inst.tree_node_queue.remote(mask_tree_node_queue,
role=consts.HOST,
idx=idx,
suffix=(dep,))
def sync_node_positions(self, dep, idx=-1):
LOGGER.info("send node positions of depth {}".format(dep))
self.transfer_inst.node_positions.remote(self.inst2node_idx,
role=consts.HOST,
idx=idx,
suffix=(dep,))
def sync_encrypted_splitinfo_host(self, dep=-1, batch=-1, idx=-1):
LOGGER.info("get encrypted splitinfo of depth {}, batch {}".format(dep, batch))
LOGGER.debug('host idx is {}'.format(idx))
encrypted_splitinfo_host = self.transfer_inst.encrypted_splitinfo_host.get(idx=idx,
suffix=(dep, batch,))
ret = []
if idx == -1:
for obj in encrypted_splitinfo_host:
ret.append(obj.get_data())
else:
ret.append(encrypted_splitinfo_host.get_data())
return ret
def sync_federated_best_splitinfo_host(self, federated_best_splitinfo_host, dep=-1, batch=-1, idx=-1):
LOGGER.info("send federated best splitinfo of depth {}, batch {}".format(dep, batch))
self.transfer_inst.federated_best_splitinfo_host.remote(federated_best_splitinfo_host,
role=consts.HOST,
idx=idx,
suffix=(dep, batch,))
def sync_final_split_host(self, dep=-1, batch=-1, idx=-1):
LOGGER.info("get host final splitinfo of depth {}, batch {}".format(dep, batch))
final_splitinfo_host = self.transfer_inst.final_splitinfo_host.get(idx=idx,
suffix=(dep, batch,))
return final_splitinfo_host if idx == -1 else [final_splitinfo_host]
def sync_dispatch_node_host(self, dispatch_guest_data, dep=-1, idx=-1):
LOGGER.info("send node to host to dispatch, depth is {}".format(dep))
self.transfer_inst.dispatch_node_host.remote(dispatch_guest_data,
role=consts.HOST,
idx=idx,
suffix=(dep,))
LOGGER.info("get host dispatch result, depth is {}".format(dep))
ret = self.transfer_inst.dispatch_node_host_result.get(idx=idx, suffix=(dep,))
return ret if idx == -1 else [ret]
def sync_tree(self, idx=-1):
LOGGER.info("sync tree to host")
tree_nodes = self.remove_sensitive_info()
self.transfer_inst.tree.remote(tree_nodes,
role=consts.HOST,
idx=idx)
def sync_predict_finish_tag(self, finish_tag, send_times):
LOGGER.info("send the {}-th predict finish tag {} to host".format(finish_tag, send_times))
self.transfer_inst.predict_finish_tag.remote(finish_tag,
role=consts.HOST,
idx=-1,
suffix=(send_times,))
def sync_predict_data(self, predict_data, send_times):
LOGGER.info("send predict data to host, sending times is {}".format(send_times))
self.transfer_inst.predict_data.remote(predict_data,
role=consts.HOST,
idx=-1,
suffix=(send_times,))
def sync_data_predicted_by_host(self, send_times):
LOGGER.info("get predicted data by host, recv times is {}".format(send_times))
predict_data = self.transfer_inst.predict_data_by_host.get(idx=-1,
suffix=(send_times,))
return predict_data
"""
Pre-porcess / Post-Process
"""
def remove_sensitive_info(self):
"""
host is not allowed to get weights/g/h
"""
new_tree_ = copy.deepcopy(self.tree_node)
for node in new_tree_:
node.weight = None
node.sum_grad = None
node.sum_hess = None
node.fid = -1
node.bid = -1
return new_tree_
def initialize_root_node(self):
LOGGER.info('initializing root node')
root_sum_grad, root_sum_hess = self.get_grad_hess_sum(self.grad_and_hess)
root_node = Node(id=0, sitename=self.sitename, sum_grad=root_sum_grad, sum_hess=root_sum_hess,
weight=self.splitter.node_weight(root_sum_grad, root_sum_hess))
return root_node
def convert_bin_to_real(self):
LOGGER.info("convert tree node bins to real value")
for i in range(len(self.tree_node)):
if self.tree_node[i].is_leaf is True:
continue
if self.tree_node[i].sitename == self.sitename:
fid = self.decode("feature_idx", self.tree_node[i].fid, split_maskdict=self.split_maskdict)
bid = self.decode("feature_val", self.tree_node[i].bid, self.tree_node[i].id, self.split_maskdict)
real_split_val = self.encode("feature_val", self.bin_split_points[fid][bid], self.tree_node[i].id)
self.tree_node[i].bid = real_split_val
"""
Tree Updating
"""
def update_tree(self, split_info, reach_max_depth):
LOGGER.info("update tree node, splitlist length is {}, tree node queue size is".format(
len(split_info), len(self.cur_layer_nodes)))
new_tree_node_queue = []
for i in range(len(self.cur_layer_nodes)):
sum_grad = self.cur_layer_nodes[i].sum_grad
sum_hess = self.cur_layer_nodes[i].sum_hess
if reach_max_depth or split_info[i].gain <= \
self.min_impurity_split + consts.FLOAT_ZERO: # if reach max_depth, only convert nodes to leaves
self.cur_layer_nodes[i].is_leaf = True
else:
pid = self.cur_layer_nodes[i].id
self.cur_layer_nodes[i].left_nodeid = self.tree_node_num + 1
self.cur_layer_nodes[i].right_nodeid = self.tree_node_num + 2
self.tree_node_num += 2
left_node = Node(id=self.cur_layer_nodes[i].left_nodeid,
sitename=self.sitename,
sum_grad=split_info[i].sum_grad,
sum_hess=split_info[i].sum_hess,
weight=self.splitter.node_weight(split_info[i].sum_grad, split_info[i].sum_hess),
is_left_node=True,
parent_nodeid=pid)
right_node = Node(id=self.cur_layer_nodes[i].right_nodeid,
sitename=self.sitename,
sum_grad=sum_grad - split_info[i].sum_grad,
sum_hess=sum_hess - split_info[i].sum_hess,
weight=self.splitter.node_weight(
sum_grad - split_info[i].sum_grad,
sum_hess - split_info[i].sum_hess),
is_left_node=False,
parent_nodeid=pid)
new_tree_node_queue.append(left_node)
new_tree_node_queue.append(right_node)
self.cur_layer_nodes[i].sitename = split_info[i].sitename
if self.cur_layer_nodes[i].sitename == self.sitename:
self.cur_layer_nodes[i].fid = self.encode("feature_idx", split_info[i].best_fid)
self.cur_layer_nodes[i].bid = self.encode("feature_val", split_info[i].best_bid,
self.cur_layer_nodes[i].id)
self.cur_layer_nodes[i].missing_dir = self.encode("missing_dir",
split_info[i].missing_dir,
self.cur_layer_nodes[i].id)
if split_info[i].sitename == self.sitename:
self.update_feature_importance(split_info[i])
self.tree_node.append(self.cur_layer_nodes[i])
self.cur_layer_nodes = new_tree_node_queue
@staticmethod
def assign_an_instance(value, tree_=None, decoder=None, sitename=consts.GUEST,
split_maskdict=None, bin_sparse_points=None,
use_missing=False, zero_as_missing=False,
missing_dir_maskdict=None):
unleaf_state, nodeid = value[1]
if tree_[nodeid].is_leaf is True:
return tree_[nodeid].id
else:
if tree_[nodeid].sitename == sitename:
next_layer_nid = HeteroDecisionTreeGuest.go_next_layer(tree_[nodeid], value[0], use_missing,
zero_as_missing, bin_sparse_points,
split_maskdict,
missing_dir_maskdict, decoder)
return 1, next_layer_nid
else:
return (1, tree_[nodeid].fid, tree_[nodeid].bid, tree_[nodeid].sitename,
nodeid, tree_[nodeid].left_nodeid, tree_[nodeid].right_nodeid)
def assign_instances_to_new_node(self, dep, reach_max_depth=False):
LOGGER.info("redispatch node of depth {}".format(dep))
dispatch_node_method = functools.partial(self.assign_an_instance,
tree_=self.tree_node,
decoder=self.decode,
sitename=self.sitename,
split_maskdict=self.split_maskdict,
bin_sparse_points=self.bin_sparse_points,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
dispatch_guest_result = self.data_with_node_assignments.mapValues(dispatch_node_method)
LOGGER.info("remask dispatch node result of depth {}".format(dep))
dispatch_to_host_result = dispatch_guest_result.filter(
lambda key, value: isinstance(value, tuple) and len(value) > 2)
dispatch_guest_result = dispatch_guest_result.subtractByKey(dispatch_to_host_result)
leaf = dispatch_guest_result.filter(lambda key, value: isinstance(value, tuple) is False)
if self.sample_leaf_pos is None:
self.sample_leaf_pos = leaf
else:
self.sample_leaf_pos = self.sample_leaf_pos.union(leaf)
if reach_max_depth: # if reach max_depth only update weight samples
return
dispatch_guest_result = dispatch_guest_result.subtractByKey(leaf)
dispatch_node_host_result = self.sync_dispatch_node_host(dispatch_to_host_result, dep)
self.inst2node_idx = None
for idx in range(len(dispatch_node_host_result)):
if self.inst2node_idx is None:
self.inst2node_idx = dispatch_node_host_result[idx]
else:
self.inst2node_idx = self.inst2node_idx.join(dispatch_node_host_result[idx],
lambda unleaf_state_nodeid1, unleaf_state_nodeid2:
unleaf_state_nodeid1 if len(
unleaf_state_nodeid1) == 2 else unleaf_state_nodeid2)
self.inst2node_idx = self.inst2node_idx.union(dispatch_guest_result)
def assign_instance_to_leaves_and_update_weights(self):
# re-assign samples to leaf nodes and update weights
self.update_tree([], True)
self.update_instances_node_positions()
self.assign_instances_to_new_node(self.max_depth, reach_max_depth=True)
def update_instances_node_positions(self):
self.data_with_node_assignments = self.data_bin.join(self.inst2node_idx, lambda data_inst, dispatch_info: (
data_inst, dispatch_info))
"""
Fit & Predict
"""
def fit(self):
LOGGER.info('fitting a guest decision tree')
self.init_packer_and_sync_gh()
root_node = self.initialize_root_node()
self.cur_layer_nodes = [root_node]
self.inst2node_idx = self.assign_instance_to_root_node(self.data_bin, root_node_id=root_node.id)
for dep in range(self.max_depth):
LOGGER.info('At dep {}, cur layer has {} nodes'.format(dep, len(self.cur_layer_nodes)))
self.sync_cur_to_split_nodes(self.cur_layer_nodes, dep)
if len(self.cur_layer_nodes) == 0:
break
self.sync_node_positions(dep)
self.update_instances_node_positions()
split_info = []
for batch_idx, i in enumerate(range(0, len(self.cur_layer_nodes), self.max_split_nodes)):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
node_map = self.get_node_map(self.cur_to_split_nodes)
cur_splitinfos = self.compute_best_splits(self.cur_to_split_nodes, node_map, dep, batch_idx)
split_info.extend(cur_splitinfos)
self.update_tree(split_info, False)
self.assign_instances_to_new_node(dep)
if self.cur_layer_nodes:
self.assign_instance_to_leaves_and_update_weights()
self.convert_bin_to_real()
self.round_leaf_val()
self.sync_tree()
self.sample_weights_post_process()
LOGGER.info("fitting guest decision tree done")
@staticmethod
def traverse_tree(predict_state, data_inst, tree_=None,
decoder=None, sitename=consts.GUEST, split_maskdict=None,
use_missing=None, zero_as_missing=None, missing_dir_maskdict=None, return_leaf_id=False):
nid, tag = predict_state
while tree_[nid].sitename == sitename:
if tree_[nid].is_leaf is True:
return tree_[nid].weight if not return_leaf_id else nid
nid = DecisionTree.go_next_layer(tree_[nid], data_inst, use_missing, zero_as_missing,
None, split_maskdict, missing_dir_maskdict, decoder)
return nid, 1
def predict(self, data_inst):
LOGGER.info("start to predict!")
predict_data = data_inst.mapValues(lambda inst: (0, 1))
site_host_send_times = 0
predict_result = None
while True:
traverse_tree = functools.partial(self.traverse_tree,
tree_=self.tree_node,
decoder=self.decode,
sitename=self.sitename,
split_maskdict=self.split_maskdict,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict,
return_leaf_id=False)
predict_data = predict_data.join(data_inst, traverse_tree)
predict_leaf = predict_data.filter(lambda key, value: isinstance(value, tuple) is False)
if predict_result is None:
predict_result = predict_leaf
else:
predict_result = predict_result.union(predict_leaf)
predict_data = predict_data.subtractByKey(predict_leaf)
unleaf_node_count = predict_data.count()
if unleaf_node_count == 0:
self.sync_predict_finish_tag(True, site_host_send_times)
break
self.sync_predict_finish_tag(False, site_host_send_times)
self.sync_predict_data(predict_data, site_host_send_times)
predict_data_host = self.sync_data_predicted_by_host(site_host_send_times)
for i in range(len(predict_data_host)):
predict_data = predict_data.join(predict_data_host[i],
lambda state1_nodeid1, state2_nodeid2:
state1_nodeid1 if state1_nodeid1[
1] == 0 else state2_nodeid2)
site_host_send_times += 1
LOGGER.info("predict finish!")
return predict_result
"""
Tree output
"""
def get_model_meta(self):
model_meta = DecisionTreeModelMeta()
model_meta.criterion_meta.CopyFrom(CriterionMeta(criterion_method=self.criterion_method,
criterion_param=self.criterion_params))
model_meta.max_depth = self.max_depth
model_meta.min_sample_split = self.min_sample_split
model_meta.min_impurity_split = self.min_impurity_split
model_meta.min_leaf_node = self.min_leaf_node
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
return model_meta
def set_model_meta(self, model_meta):
self.max_depth = model_meta.max_depth
self.min_sample_split = model_meta.min_sample_split
self.min_impurity_split = model_meta.min_impurity_split
self.min_leaf_node = model_meta.min_leaf_node
self.criterion_method = model_meta.criterion_meta.criterion_method
self.criterion_params = list(model_meta.criterion_meta.criterion_param)
self.use_missing = model_meta.use_missing
self.zero_as_missing = model_meta.zero_as_missing
def get_model_param(self):
model_param = DecisionTreeModelParam()
for node in self.tree_node:
weight, mo_weight = self.mo_weight_extract(node)
model_param.tree_.add(id=node.id,
sitename=node.sitename,
fid=node.fid,
bid=node.bid,
weight=weight,
is_leaf=node.is_leaf,
left_nodeid=node.left_nodeid,
right_nodeid=node.right_nodeid,
missing_dir=node.missing_dir,
mo_weight=mo_weight
)
model_param.split_maskdict.update(self.split_maskdict)
model_param.missing_dir_maskdict.update(self.missing_dir_maskdict)
model_param.leaf_count.update(self.leaf_count)
return model_param
def set_model_param(self, model_param):
self.tree_node = []
for node_param in model_param.tree_:
weight = self.mo_weight_load(node_param)
_node = Node(id=node_param.id,
sitename=node_param.sitename,
fid=node_param.fid,
bid=node_param.bid,
weight=weight,
is_leaf=node_param.is_leaf,
left_nodeid=node_param.left_nodeid,
right_nodeid=node_param.right_nodeid,
missing_dir=node_param.missing_dir)
self.tree_node.append(_node)
self.split_maskdict = dict(model_param.split_maskdict)
self.missing_dir_maskdict = dict(model_param.missing_dir_maskdict)
| 38,119 | 44.927711 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/hetero/hetero_decision_tree_host.py
|
import numpy as np
import functools
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.util import LOGGER
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree
from federatedml.transfer_variable.transfer_class.hetero_decision_tree_transfer_variable import \
HeteroDecisionTreeTransferVariable
from federatedml.util import consts
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim import PackedGHCompressor
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
class HeteroDecisionTreeHost(DecisionTree):
def __init__(self, tree_param):
super(HeteroDecisionTreeHost, self).__init__(tree_param)
# add host side feature importance support
self.feature_importance_type = 'split'
self.encrypted_grad_and_hess = None
self.runtime_idx = 0
self.sitename = consts.HOST # will be modified in self.set_runtime_idx()
self.complete_secure_tree = False
self.host_party_idlist = []
# feature shuffling / missing_dir masking
self.feature_num = -1
self.missing_dir_mask_left = {} # mask for left direction
self.missing_dir_mask_right = {} # mask for right direction
self.split_maskdict = {} # save split value
self.split_feature_dict = {} # save split feature idx
self.missing_dir_maskdict = {} # save missing dir
self.fid_bid_random_mapping = {}
self.inverse_fid_bid_random_mapping = {}
self.bin_num = None
# goss subsample
self.run_goss = False
# transfer variable
self.transfer_inst = HeteroDecisionTreeTransferVariable()
# cipher compressing
self.cipher_compressor = None
self.run_cipher_compressing = True
# code version control
self.new_ver = True
# multi mode
self.mo_tree = False
"""
Setting
"""
def report_init_status(self):
LOGGER.info('reporting initialization status')
LOGGER.info('using new version code {}'.format(self.new_ver))
if self.complete_secure_tree:
LOGGER.info('running complete secure')
if self.run_goss:
LOGGER.info('running goss')
if self.run_cipher_compressing:
LOGGER.info('running cipher compressing')
LOGGER.debug('bin num and feature num: {}/{}'.format(self.bin_num, self.feature_num))
def init(self, flowid, runtime_idx, data_bin, bin_split_points, bin_sparse_points, bin_num,
valid_features,
complete_secure=False,
goss_subsample=False,
cipher_compressing=False,
new_ver=True,
mo_tree=False
):
super(HeteroDecisionTreeHost, self).init_data_and_variable(flowid, runtime_idx, data_bin, bin_split_points,
bin_sparse_points, valid_features, None)
self.check_max_split_nodes()
self.complete_secure_tree = complete_secure
self.run_goss = goss_subsample
self.bin_num = bin_num
self.run_cipher_compressing = cipher_compressing
self.feature_num = self.bin_split_points.shape[0]
self.new_ver = new_ver
self.mo_tree = mo_tree
self.report_init_status()
def set_host_party_idlist(self, l):
self.host_party_idlist = l
"""
Node encode/decode
"""
def generate_missing_dir(self, dep, left_num=3, right_num=3):
"""
randomly generate missing dir mask
"""
rn = np.random.choice(range(left_num + right_num), left_num + right_num, replace=False)
left_dir = rn[0:left_num]
right_dir = rn[left_num:]
self.missing_dir_mask_left[dep] = left_dir
self.missing_dir_mask_right[dep] = right_dir
@staticmethod
def generate_fid_bid_random_mapping(feature_num, bin_num):
total_id_num = feature_num * bin_num
mapping = {}
idx = 0
id_list = np.random.choice(range(total_id_num), total_id_num, replace=False)
for fid in range(feature_num):
for bid in range(bin_num):
mapping[(fid, bid)] = int(id_list[idx])
idx += 1
return mapping
def save_split_info(self, etype="feature_idx", val=None, nid=None):
if etype == "feature_idx":
self.split_feature_dict[nid] = val
elif etype == "feature_val":
self.split_maskdict[nid] = val
elif etype == "missing_dir":
self.missing_dir_maskdict[nid] = val
else:
raise TypeError("encode type %s is not support!" % (str(etype)))
@staticmethod
def decode(dtype="feature_idx", val=None, nid=None, split_maskdict=None, missing_dir_maskdict=None):
if dtype == "feature_idx":
return val
if dtype == "feature_val":
if nid in split_maskdict:
return split_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
if dtype == "missing_dir":
if nid in missing_dir_maskdict:
return missing_dir_maskdict[nid]
else:
raise ValueError("decode val %s cause error, can't recognize it!" % (str(val)))
return TypeError("decode type %s is not support!" % (str(dtype)))
def generate_split_point_masking_variable(self, dep):
# for split point masking
self.generate_missing_dir(dep, 5, 5)
self.fid_bid_random_mapping = self.generate_fid_bid_random_mapping(self.feature_num, self.bin_num)
self.inverse_fid_bid_random_mapping = {v: k for k, v in self.fid_bid_random_mapping.items()}
def unmask_split_info(self, split_info_list, inverse_mask_id_mapping, left_missing_dir, right_missing_dir):
for split_info in split_info_list:
if split_info.mask_id is not None:
fid, bid = inverse_mask_id_mapping[split_info.mask_id]
split_info.best_fid, split_info.best_bid = fid, bid
masked_missing_dir = split_info.missing_dir
if masked_missing_dir in left_missing_dir:
split_info.missing_dir = -1
elif masked_missing_dir in right_missing_dir:
split_info.missing_dir = 1
return split_info_list
def record_split_info(self, split_info_list):
final_split_info = []
for i, split_info in enumerate(split_info_list):
if split_info.best_fid != -1:
LOGGER.debug('sitename is {}, self.sitename is {}'
.format(split_info.sitename, self.sitename))
assert split_info.sitename == self.sitename
self.save_split_info("feature_idx", split_info.best_fid, self.cur_to_split_nodes[i].id)
assert split_info.best_fid is not None
self.save_split_info("feature_val", split_info.best_bid, self.cur_to_split_nodes[i].id)
self.save_split_info("missing_dir", split_info.missing_dir, self.cur_to_split_nodes[i].id)
split_info.mask_id = None
else:
LOGGER.debug('this node can not be further split by host feature: {}'.format(split_info))
final_split_info.append(split_info)
return final_split_info
"""
Federation Functions
"""
def init_compressor_and_sync_gh(self):
LOGGER.info("get encrypted grad and hess")
if self.run_cipher_compressing:
self.cipher_compressor = PackedGHCompressor(mo_mode=self.mo_tree)
self.grad_and_hess = self.transfer_inst.encrypted_grad_and_hess.get(idx=0)
def sync_node_positions(self, dep=-1):
LOGGER.info("get tree node queue of depth {}".format(dep))
node_positions = self.transfer_inst.node_positions.get(idx=0,
suffix=(dep,))
return node_positions
def sync_tree_node_queue(self, dep=-1):
LOGGER.info("get tree node queue of depth {}".format(dep))
self.cur_layer_nodes = self.transfer_inst.tree_node_queue.get(idx=0,
suffix=(dep,))
def sync_encrypted_splitinfo_host(self, encrypted_splitinfo_host, dep=-1, batch=-1):
LOGGER.info("send encrypted splitinfo of depth {}, batch {}".format(dep, batch))
self.transfer_inst.encrypted_splitinfo_host.remote(encrypted_splitinfo_host,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch,))
def sync_federated_best_splitinfo_host(self, dep=-1, batch=-1):
LOGGER.info("get federated best splitinfo of depth {}, batch {}".format(dep, batch))
federated_best_splitinfo_host = self.transfer_inst.federated_best_splitinfo_host.get(idx=0,
suffix=(dep, batch,))
return federated_best_splitinfo_host
def sync_dispatch_node_host(self, dep):
LOGGER.info("get node from host to dispath, depth is {}".format(dep))
dispatch_node_host = self.transfer_inst.dispatch_node_host.get(idx=0,
suffix=(dep,))
return dispatch_node_host
def sync_dispatch_node_host_result(self, dispatch_node_host_result, dep=-1):
LOGGER.info("send host dispatch result, depth is {}".format(dep))
self.transfer_inst.dispatch_node_host_result.remote(dispatch_node_host_result,
role=consts.GUEST,
idx=-1,
suffix=(dep,))
def sync_tree(self, ):
LOGGER.info("sync tree from guest")
self.tree_node = self.transfer_inst.tree.get(idx=0)
def sync_predict_finish_tag(self, recv_times):
LOGGER.info("get the {}-th predict finish tag from guest".format(recv_times))
finish_tag = self.transfer_inst.predict_finish_tag.get(idx=0,
suffix=(recv_times,))
return finish_tag
def sync_predict_data(self, recv_times):
LOGGER.info("srecv predict data to host, recv times is {}".format(recv_times))
predict_data = self.transfer_inst.predict_data.get(idx=0,
suffix=(recv_times,))
return predict_data
def sync_data_predicted_by_host(self, predict_data, send_times):
LOGGER.info("send predicted data by host, send times is {}".format(send_times))
self.transfer_inst.predict_data_by_host.remote(predict_data,
role=consts.GUEST,
idx=0,
suffix=(send_times,))
"""
Tree Updating
"""
@staticmethod
def assign_an_instance(value1, value2, sitename=None, decoder=None,
split_feature_dict=None,
bin_sparse_points=None,
use_missing=False, zero_as_missing=False,
split_maskdict=None,
missing_dir_maskdict=None):
unleaf_state, fid, bid, node_sitename, nodeid, left_nodeid, right_nodeid = value1
if node_sitename != sitename:
return value1
fid = split_feature_dict[nodeid]
bid = decoder("feature_val", bid, nodeid, split_maskdict=split_maskdict)
missing_dir = decoder("missing_dir", 1, nodeid, missing_dir_maskdict=missing_dir_maskdict)
direction = HeteroDecisionTreeHost.make_decision(value2, fid, bid, missing_dir, use_missing, zero_as_missing,
bin_sparse_points[fid])
return (unleaf_state, left_nodeid) if direction else (unleaf_state, right_nodeid)
def assign_instances_to_new_node(self, dispatch_node_host, dep=-1):
LOGGER.info("start to find host dispath of depth {}".format(dep))
dispatch_node_method = functools.partial(self.assign_an_instance,
sitename=self.sitename,
decoder=self.decode,
split_maskdict=self.split_maskdict,
split_feature_dict=self.split_feature_dict,
bin_sparse_points=self.bin_sparse_points,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
dispatch_node_host_result = dispatch_node_host.join(self.data_bin, dispatch_node_method)
self.sync_dispatch_node_host_result(dispatch_node_host_result, dep)
def update_instances_node_positions(self):
# join data and inst2node_idx to update current node positions of samples
self.data_with_node_assignments = self.data_bin.join(self.inst2node_idx, lambda v1, v2: (v1, v2))
"""
Pre-Process / Post-Process
"""
def remove_redundant_splitinfo_in_split_maskdict(self, split_nid_used):
LOGGER.info("remove duplicated nodes from split mask dict")
duplicated_nodes = set(self.split_maskdict.keys()) - set(split_nid_used)
for nid in duplicated_nodes:
del self.split_maskdict[nid]
def convert_bin_to_real(self, split_maskdict):
LOGGER.info("convert tree node bins to real value")
split_nid_used = []
for i in range(len(self.tree_node)):
if self.tree_node[i].is_leaf:
continue
if self.tree_node[i].sitename == self.sitename:
fid = self.split_feature_dict[self.tree_node[i].id]
bid = self.decode("feature_val", self.tree_node[i].bid, self.tree_node[i].id, split_maskdict)
# recover real split value
real_splitval = self.bin_split_points[fid][bid]
self.split_maskdict[self.tree_node[i].id] = real_splitval
self.tree_node[i].fid = fid
split_nid_used.append(self.tree_node[i].id)
self.remove_redundant_splitinfo_in_split_maskdict(split_nid_used)
def collect_host_split_feat_importance(self):
for node in self.tree_node:
if node.is_leaf:
continue
elif node.sitename == self.sitename:
LOGGER.debug('sitename are {} {}'.format(node.sitename, self.sitename))
fid = self.split_feature_dict[node.id]
self.update_feature_importance(SplitInfo(sitename=self.sitename, best_fid=fid), False)
"""
Split finding
"""
def get_computing_inst2node_idx(self):
if self.run_goss:
inst2node_idx = self.inst2node_idx.join(self.grad_and_hess, lambda x1, x2: x1)
else:
inst2node_idx = self.inst2node_idx
return inst2node_idx
def compute_best_splits(self, cur_to_split_nodes: list, node_map, dep, batch):
LOGGER.info('solving node batch {}, node num is {}'.format(batch, len(cur_to_split_nodes)))
if not self.complete_secure_tree:
data = self.data_with_node_assignments
inst2node_idx = self.get_computing_inst2node_idx()
node_sample_count = self.count_node_sample_num(inst2node_idx, node_map)
LOGGER.debug('sample count is {}'.format(node_sample_count))
acc_histograms = self.get_local_histograms(dep, data, self.grad_and_hess, node_sample_count,
cur_to_split_nodes, node_map, ret='tb',
hist_sub=True)
split_info_table = self.splitter.host_prepare_split_points(
histograms=acc_histograms,
use_missing=self.use_missing,
valid_features=self.valid_features,
sitename=self.sitename,
left_missing_dir=self.missing_dir_mask_left[dep],
right_missing_dir=self.missing_dir_mask_right[dep],
mask_id_mapping=self.fid_bid_random_mapping,
batch_size=self.bin_num,
cipher_compressor=self.cipher_compressor,
shuffle_random_seed=np.abs(hash((dep, batch))))
# test split info encryption
self.transfer_inst.encrypted_splitinfo_host.remote(split_info_table,
role=consts.GUEST,
idx=-1,
suffix=(dep, batch))
best_split_info = self.transfer_inst.federated_best_splitinfo_host.get(suffix=(dep, batch), idx=0)
unmasked_split_info = self.unmask_split_info(
best_split_info,
self.inverse_fid_bid_random_mapping,
self.missing_dir_mask_left[dep],
self.missing_dir_mask_right[dep])
self.record_split_info(unmasked_split_info)
else:
LOGGER.debug('skip splits computation')
"""
Fit & Predict
"""
def fit(self):
LOGGER.info("begin to fit host decision tree")
self.init_compressor_and_sync_gh()
LOGGER.debug('grad and hess count {}'.format(self.grad_and_hess.count()))
for dep in range(self.max_depth):
LOGGER.debug('At dep {}'.format(dep))
self.sync_tree_node_queue(dep)
self.generate_split_point_masking_variable(dep)
if len(self.cur_layer_nodes) == 0:
break
self.inst2node_idx = self.sync_node_positions(dep)
self.update_instances_node_positions()
batch = 0
for i in range(0, len(self.cur_layer_nodes), self.max_split_nodes):
self.cur_to_split_nodes = self.cur_layer_nodes[i: i + self.max_split_nodes]
self.compute_best_splits(self.cur_to_split_nodes,
node_map=self.get_node_map(self.cur_to_split_nodes),
dep=dep, batch=batch)
batch += 1
dispatch_node_host = self.sync_dispatch_node_host(dep)
self.assign_instances_to_new_node(dispatch_node_host, dep=dep)
self.sync_tree()
self.print_leafs()
# convert bin index to real split-value, and remove redundant nid in split mask dict
self.convert_bin_to_real(split_maskdict=self.split_maskdict)
self.collect_host_split_feat_importance()
LOGGER.info("fitting host decision tree done")
@staticmethod
def traverse_tree(predict_state, data_inst, tree_=None,
decoder=None, split_maskdict=None, sitename=consts.HOST,
use_missing=False, zero_as_missing=False,
missing_dir_maskdict=None):
nid, _ = predict_state
if tree_[nid].sitename != sitename:
return predict_state
while tree_[nid].sitename == sitename:
nid = HeteroDecisionTreeHost.go_next_layer(tree_[nid], data_inst, use_missing, zero_as_missing,
None, split_maskdict, missing_dir_maskdict, decoder)
return nid, 0
def predict(self, data_inst):
LOGGER.info("start to predict!")
site_guest_send_times = 0
while True:
finish_tag = self.sync_predict_finish_tag(site_guest_send_times)
if finish_tag is True:
break
predict_data = self.sync_predict_data(site_guest_send_times)
traverse_tree = functools.partial(self.traverse_tree,
tree_=self.tree_node,
decoder=self.decode,
split_maskdict=self.split_maskdict,
sitename=self.sitename,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing,
missing_dir_maskdict=self.missing_dir_maskdict)
predict_data = predict_data.join(data_inst, traverse_tree)
self.sync_data_predicted_by_host(predict_data, site_guest_send_times)
site_guest_send_times += 1
LOGGER.info("predict finish!")
"""
Tree Output
"""
def get_model_meta(self):
model_meta = DecisionTreeModelMeta()
model_meta.max_depth = self.max_depth
model_meta.min_sample_split = self.min_sample_split
model_meta.min_impurity_split = self.min_impurity_split
model_meta.min_leaf_node = self.min_leaf_node
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
return model_meta
def set_model_meta(self, model_meta):
self.max_depth = model_meta.max_depth
self.min_sample_split = model_meta.min_sample_split
self.min_impurity_split = model_meta.min_impurity_split
self.min_leaf_node = model_meta.min_leaf_node
self.use_missing = model_meta.use_missing
self.zero_as_missing = model_meta.zero_as_missing
def get_model_param(self):
model_param = DecisionTreeModelParam()
for node in self.tree_node:
model_param.tree_.add(id=node.id,
sitename=node.sitename,
fid=node.fid,
bid=node.bid,
weight=node.weight,
is_leaf=node.is_leaf,
left_nodeid=node.left_nodeid,
right_nodeid=node.right_nodeid,
missing_dir=node.missing_dir)
model_param.split_maskdict.update(self.split_maskdict)
model_param.missing_dir_maskdict.update(self.missing_dir_maskdict)
return model_param
def set_model_param(self, model_param):
self.tree_node = []
for node_param in model_param.tree_:
_node = Node(id=node_param.id,
sitename=node_param.sitename,
fid=node_param.fid,
bid=node_param.bid,
weight=node_param.weight,
is_leaf=node_param.is_leaf,
left_nodeid=node_param.left_nodeid,
right_nodeid=node_param.right_nodeid,
missing_dir=node_param.missing_dir)
self.tree_node.append(_node)
self.split_maskdict = dict(model_param.split_maskdict)
self.missing_dir_maskdict = dict(model_param.missing_dir_maskdict)
"""
don t have to implements
"""
def initialize_root_node(self, *args):
pass
def update_tree(self, *args):
pass
| 23,965 | 41.796429 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/hetero_secoreboost/hetero_secureboost_guest.py
|
import numpy as np
from operator import itemgetter
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.ensemble.boosting import HeteroBoostingGuest
from federatedml.param.boosting_param import HeteroSecureBoostParam, DecisionTreeParam
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.util.anonymous_generator_util import Anonymous
from federatedml.statistic.data_overview import with_weight, get_max_sample_weight
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance import FeatureImportance
from federatedml.transfer_variable.transfer_class.hetero_secure_boosting_predict_transfer_variable import \
HeteroSecureBoostTransferVariable
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core import tree_plan as plan
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.ensemble.secureboost.secureboost_util.tree_model_io import load_hetero_tree_learner, \
produce_hetero_tree_learner
from federatedml.ensemble.secureboost.secureboost_util.boosting_tree_predict import sbt_guest_predict, \
mix_sbt_guest_predict, EINI_guest_predict
from federatedml.ensemble.secureboost.secureboost_util.subsample import goss_sampling
class HeteroSecureBoostingTreeGuest(HeteroBoostingGuest):
def __init__(self):
super(HeteroSecureBoostingTreeGuest, self).__init__()
self.tree_param = DecisionTreeParam() # decision tree param
self.use_missing = False
self.zero_as_missing = False
self.cur_epoch_idx = -1
self.grad_and_hess = None
self.feature_importances_ = {}
self.model_param = HeteroSecureBoostParam()
self.complete_secure = 0
self.data_alignment_map = {}
self.hetero_sbt_transfer_variable = HeteroSecureBoostTransferVariable()
self.model_name = 'HeteroSecureBoost'
self.max_sample_weight = 1
self.max_sample_weight_computed = False
self.re_compute_goss_sample_weight = False
self.cipher_compressing = False
self.enable_goss = False # GOSS
self.top_rate = None
self.other_rate = None
self.new_ver = True
self.boosting_strategy = consts.STD_TREE # default work mode is std
# fast sbt param
self.tree_num_per_party = 1
self.guest_depth = 0
self.host_depth = 0
self.init_tree_plan = False
self.tree_plan = []
# multi-classification mode
self.multi_mode = consts.SINGLE_OUTPUT
# EINI predict param
self.EINI_inference = False
self.EINI_random_mask = False
def _init_model(self, param: HeteroSecureBoostParam):
super(HeteroSecureBoostingTreeGuest, self)._init_model(param)
self.tree_param = param.tree_param
self.use_missing = param.use_missing
self.zero_as_missing = param.zero_as_missing
self.complete_secure = param.complete_secure
self.enable_goss = param.run_goss
self.top_rate = param.top_rate
self.other_rate = param.other_rate
self.cipher_compressing = param.cipher_compress
self.new_ver = param.new_ver
self.EINI_inference = param.EINI_inference
self.EINI_random_mask = param.EINI_random_mask
# fast sbt param
self.tree_num_per_party = param.tree_num_per_party
self.boosting_strategy = param.boosting_strategy
self.guest_depth = param.guest_depth
self.host_depth = param.host_depth
if self.boosting_strategy == consts.LAYERED_TREE:
param.tree_param.max_depth = param.guest_depth + param.host_depth
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
self.multi_mode = param.multi_mode
def process_sample_weights(self, grad_and_hess, data_with_sample_weight=None):
# add sample weights to gradient and hessian
if data_with_sample_weight is not None:
if with_weight(data_with_sample_weight):
LOGGER.info('weighted sample detected, multiply g/h by weights')
grad_and_hess = grad_and_hess.join(data_with_sample_weight,
lambda v1, v2: (v1[0] * v2.weight, v1[1] * v2.weight))
if not self.max_sample_weight_computed:
self.max_sample_weight = get_max_sample_weight(data_with_sample_weight)
LOGGER.info('max sample weight is {}'.format(self.max_sample_weight))
self.max_sample_weight_computed = True
return grad_and_hess
def get_tree_plan(self, idx):
if not self.init_tree_plan:
tree_plan = plan.create_tree_plan(self.boosting_strategy, k=self.tree_num_per_party,
tree_num=self.boosting_round,
host_list=self.component_properties.host_party_idlist,
complete_secure=self.complete_secure)
self.tree_plan += tree_plan
self.init_tree_plan = True
LOGGER.info('tree plan is {}'.format(self.tree_plan))
return self.tree_plan[idx]
def check_host_number(self, tree_type):
host_num = len(self.component_properties.host_party_idlist)
LOGGER.info('host number is {}'.format(host_num))
if tree_type == plan.tree_type_dict['layered_tree']:
assert host_num == 1, 'only 1 host party is allowed in layered mode'
def compute_grad_and_hess(self, y_hat, y, data_with_sample_weight=None):
LOGGER.info("compute grad and hess")
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, loss_method.predict(f_val)),
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
grad_and_hess = self.process_sample_weights(grad_and_hess, data_with_sample_weight)
return grad_and_hess
@staticmethod
def get_grad_and_hess(g_h, dim=0):
LOGGER.info("get grad and hess of tree {}".format(dim))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][dim], grad_and_hess[1][dim]))
return grad_and_hess_subtree
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importances_:
self.feature_importances_[fid] = tree_feature_importance[fid]
else:
self.feature_importances_[fid] += tree_feature_importance[fid]
LOGGER.debug('cur feature importance {}'.format(self.feature_importances_))
def align_feature_importance_guest(self, suffix):
"""
receive feature importance from host to update global feature importance
"""
host_feature_importance_list = self.hetero_sbt_transfer_variable.host_feature_importance.get(
idx=-1, suffix=suffix)
# remove host importance, make sure host importance is latest when host anonymous features are updated
pop_key = []
for key in self.feature_importances_:
sitename, fid = key
if consts.GUEST not in sitename:
pop_key.append(key)
for k in pop_key:
self.feature_importances_.pop(k)
for i in host_feature_importance_list:
self.feature_importances_.update(i)
def goss_sample(self):
sampled_gh = goss_sampling(self.grad_and_hess, self.top_rate, self.other_rate)
return sampled_gh
def on_epoch_prepare(self, epoch_idx):
"""
Parameters
----------
epoch_idx cur epoch idx
Returns None
-------
Prepare g, h, sample weights, sampling at the beginning of every epoch
"""
if self.cur_epoch_idx != epoch_idx:
self.grad_and_hess = self.compute_grad_and_hess(self.y_hat, self.y, self.data_inst)
self.cur_epoch_idx = epoch_idx
# goss sampling
if self.enable_goss:
if not self.re_compute_goss_sample_weight:
self.max_sample_weight = self.max_sample_weight * ((1 - self.top_rate) / self.other_rate)
self.grad_and_hess = self.goss_sample()
def preprocess(self):
if self.multi_mode == consts.MULTI_OUTPUT:
# re-set dimension
self.booster_dim = 1
def postprocess(self):
pass
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
self.on_epoch_prepare(epoch_idx)
if self.multi_mode == consts.MULTI_OUTPUT:
g_h = self.grad_and_hess
else:
g_h = self.get_grad_and_hess(self.grad_and_hess, booster_dim)
flow_id = self.generate_flowid(epoch_idx, booster_dim)
complete_secure = True if (epoch_idx < self.complete_secure) else False
tree_type, target_host_id = None, None
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
self.check_host_number(tree_type)
tree = produce_hetero_tree_learner(role=self.role, tree_param=self.tree_param, flow_id=flow_id,
data_bin=self.data_bin, bin_split_points=self.bin_split_points,
bin_sparse_points=self.bin_sparse_points, task_type=self.task_type,
valid_features=self.sample_valid_features(),
host_party_list=self.component_properties.host_party_idlist,
runtime_idx=self.component_properties.local_partyid,
cipher_compress=self.cipher_compressing,
g_h=g_h, encrypter=self.encrypter,
goss_subsample=self.enable_goss,
objective=self.objective_param.objective,
complete_secure=complete_secure, max_sample_weights=self.max_sample_weight,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id,
guest_depth=self.guest_depth, host_depth=self.host_depth,
mo_tree=(self.multi_mode == consts.MULTI_OUTPUT),
class_num=len(self.classes_) if len(self.classes_) > 2 else 1 # mo parameter
)
tree.fit()
self.update_feature_importance(tree.get_feature_importance())
self.align_feature_importance_guest(suffix=(epoch_idx, booster_dim))
return tree
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
flow_id = self.generate_flowid(epoch_idx, booster_idx)
runtime_idx = self.component_properties.local_partyid
host_list = self.component_properties.host_party_idlist
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
tree_type, target_host_id = None, None
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree = load_hetero_tree_learner(role=self.role, tree_param=self.tree_param, model_meta=model_meta,
model_param=model_param,
flow_id=flow_id, runtime_idx=runtime_idx, host_party_list=host_list,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id)
return tree
def generate_summary(self) -> dict:
summary = {'loss_history': self.history_loss,
'best_iteration': self.callback_variables.best_iteration,
'feature_importance': self.make_readable_feature_importance(self.feature_name_fid_mapping,
self.feature_importances_),
'validation_metrics': self.callback_variables.validation_summary,
'is_converged': self.is_converged}
return summary
@staticmethod
def make_readable_feature_importance(fid_mapping, feature_importances):
"""
replace feature id by real feature name
"""
new_fi = {}
for id_ in feature_importances:
if isinstance(id_, tuple):
if consts.GUEST in id_[0]:
new_fi[fid_mapping[id_[1]]] = feature_importances[id_].importance
else:
new_fi[id_[0] + '_' + str(id_[1])] = feature_importances[
id_].importance
else:
new_fi[fid_mapping[id_]] = feature_importances[id_].importance
return new_fi
@assert_io_num_rows_equal
def predict(self, data_inst, ret_format='std'):
# standard format, leaf indices, raw score
assert ret_format in ['std', 'leaf', 'raw'], 'illegal ret format'
LOGGER.info('running prediction')
cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)
processed_data = self.data_and_header_alignment(data_inst)
# sync feature importance if host anonymous change in model migration
if not self.on_training:
self.align_feature_importance_guest('predict')
last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key)
self.sync_predict_round(last_round)
rounds = len(self.boosting_model_list) // self.booster_dim
trees = []
LOGGER.debug('round involved in prediction {}, last round is {}, data key {}'
.format(list(range(last_round, rounds)), last_round, cache_dataset_key))
for idx in range(last_round, rounds):
for booster_idx in range(self.booster_dim):
tree = self.load_learner(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
trees.append(tree)
predict_cache = None
tree_num = len(trees)
if last_round != 0:
predict_cache = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds, last_round))
LOGGER.info('load predict cache of round {}'.format(min(rounds, last_round)))
if tree_num == 0 and predict_cache is not None and not (ret_format == 'leaf'):
return self.score_to_predict_result(data_inst, predict_cache)
if self.boosting_strategy == consts.MIX_TREE:
predict_rs = mix_sbt_guest_predict(
processed_data,
self.hetero_sbt_transfer_variable,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
predict_cache,
pred_leaf=(
ret_format == 'leaf'))
else:
if self.EINI_inference and not self.on_training: # EINI is for inference stage
sitename = self.role + ':' + str(self.component_properties.local_partyid)
predict_rs = EINI_guest_predict(
processed_data,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
self.encrypt_param.key_length,
self.hetero_sbt_transfer_variable,
sitename,
self.component_properties.host_party_idlist,
predict_cache,
False)
else:
predict_rs = sbt_guest_predict(
processed_data,
self.hetero_sbt_transfer_variable,
trees,
self.learning_rate,
self.init_score,
self.booster_dim,
predict_cache,
pred_leaf=(
ret_format == 'leaf'))
if ret_format == 'leaf':
return predict_rs # predict result is leaf position
self.predict_data_cache.add_data(cache_dataset_key, predict_rs, cur_boosting_round=rounds)
LOGGER.debug('adding predict rs {}'.format(predict_rs))
LOGGER.debug('last round is {}'.format(self.predict_data_cache.predict_data_last_round(cache_dataset_key)))
if ret_format == 'raw':
return predict_rs
else:
return self.score_to_predict_result(data_inst, predict_rs)
def load_feature_importance(self, feat_importance_param):
param = list(feat_importance_param)
rs_dict = {}
for fp in param:
if consts.GUEST in fp.sitename:
key = (fp.sitename.replace(':', '_'), fp.fid) # guest format
else:
sitename = fp.sitename.replace(':', '_')
anonymous_feat_name = Anonymous().get_suffix_from_anonymous_column(fp.fullname)
key = (sitename, anonymous_feat_name)
importance = FeatureImportance()
importance.from_protobuf(fp)
rs_dict[key] = importance
self.feature_importances_ = rs_dict
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.booster_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.boosting_round
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
model_meta.boosting_strategy = self.boosting_strategy
model_meta.module = "HeteroSecureBoost"
meta_name = consts.HETERO_SBT_GUEST_MODEL + "Meta"
return meta_name, model_meta
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(self.boosting_model_list)
model_param.tree_dim = self.booster_dim
model_param.trees_.extend(self.boosting_model_list)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.history_loss)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
if self.boosting_strategy == consts.STD_TREE:
model_param.model_name = consts.HETERO_SBT
elif self.boosting_strategy == consts.LAYERED_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_LAYERED
elif self.boosting_strategy == consts.MIX_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_MIX
model_param.best_iteration = self.callback_variables.best_iteration
feature_importances = list(self.feature_importances_.items())
feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True)
feature_importance_param = []
for (sitename, fid), importance in feature_importances:
if consts.GUEST in sitename:
fullname = self.feature_name_fid_mapping[fid]
else:
fullname = sitename + '_' + str(fid)
sitename = sitename.replace('_', ':')
fid = None
feature_importance_param.append(FeatureImportanceInfo(sitename=sitename, # sitename to distinguish sites
fid=fid,
importance=importance.importance,
fullname=fullname,
importance2=importance.importance_2,
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
model_param.tree_plan.extend(plan.encode_plan(self.tree_plan))
param_name = consts.HETERO_SBT_GUEST_MODEL + "Param"
return param_name, model_param
def set_model_meta(self, model_meta):
if not self.is_warm_start:
# these hyper parameters are not needed in warm start setting
self.boosting_round = model_meta.num_trees
self.tol = model_meta.tol
self.n_iter_no_change = model_meta.n_iter_no_change
self.bin_num = model_meta.quantile_meta.bin_num
self.learning_rate = model_meta.learning_rate
self.booster_meta = model_meta.tree_meta
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
self.boosting_strategy = model_meta.boosting_strategy
def set_model_param(self, model_param):
self.boosting_model_list = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.history_loss = list(model_param.losses)
self.classes_ = list(map(int, model_param.classes_))
self.booster_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
self.load_feature_importance(model_param.feature_importances)
# initialize loss function
self.loss = self.get_loss_function()
# init model tree plan if it exists
self.tree_plan = plan.decode_plan(model_param.tree_plan)
| 23,216 | 45.157058 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/hetero_secoreboost/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/hetero_secoreboost/hetero_secureboost_host.py
|
from operator import itemgetter
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance import FeatureImportance
from federatedml.ensemble.boosting import HeteroBoostingHost
from federatedml.param.boosting_param import HeteroSecureBoostParam, DecisionTreeParam
from federatedml.transfer_variable.transfer_class.hetero_secure_boosting_predict_transfer_variable import \
HeteroSecureBoostTransferVariable
from federatedml.ensemble.secureboost.secureboost_util.tree_model_io import produce_hetero_tree_learner, \
load_hetero_tree_learner
from federatedml.ensemble.secureboost.secureboost_util.boosting_tree_predict import sbt_host_predict, \
mix_sbt_host_predict, EINI_host_predict
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core import tree_plan as plan
from federatedml.util.anonymous_generator_util import Anonymous
class HeteroSecureBoostingTreeHost(HeteroBoostingHost):
def __init__(self):
super(HeteroSecureBoostingTreeHost, self).__init__()
self.use_missing = False
self.zero_as_missing = False
self.grad_and_hess = None
self.tree_param = DecisionTreeParam() # decision tree param
self.model_param = HeteroSecureBoostParam()
self.complete_secure = 0
self.model_name = 'HeteroSecureBoost'
self.enable_goss = False
self.cipher_compressing = False
self.max_sample_weight = None
self.round_decimal = None
self.new_ver = True
self.feature_importance_aligned = False
self.boosting_strategy = consts.STD_TREE
# fast sbt param
self.tree_num_per_party = 1
self.guest_depth = 0
self.host_depth = 0
self.init_tree_plan = False
self.tree_plan = []
self.feature_importances_ = {}
# EINI predict param
self.EINI_inference = False
self.EINI_random_mask = False
self.EINI_complexity_check = False
self.multi_mode = consts.SINGLE_OUTPUT
self.hetero_sbt_transfer_variable = HeteroSecureBoostTransferVariable()
def _init_model(self, param: HeteroSecureBoostParam):
super(HeteroSecureBoostingTreeHost, self)._init_model(param)
self.tree_param = param.tree_param
self.use_missing = param.use_missing
self.enable_goss = param.run_goss
self.zero_as_missing = param.zero_as_missing
self.complete_secure = param.complete_secure
self.sparse_opt_para = param.sparse_optimization
self.cipher_compressing = param.cipher_compress
self.new_ver = param.new_ver
self.tree_num_per_party = param.tree_num_per_party
self.boosting_strategy = param.boosting_strategy
self.guest_depth = param.guest_depth
self.host_depth = param.host_depth
self.multi_mode = param.multi_mode
self.EINI_inference = param.EINI_inference
self.EINI_random_mask = param.EINI_random_mask
self.EINI_complexity_check = param.EINI_complexity_check
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
def get_tree_plan(self, idx):
if not self.init_tree_plan:
tree_plan = plan.create_tree_plan(self.boosting_strategy, k=self.tree_num_per_party,
tree_num=self.boosting_round,
host_list=self.component_properties.host_party_idlist,
complete_secure=self.complete_secure)
self.tree_plan += tree_plan
self.init_tree_plan = True
LOGGER.info('tree plan is {}'.format(self.tree_plan))
return self.tree_plan[idx]
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importances_:
self.feature_importances_[fid] = tree_feature_importance[fid]
else:
self.feature_importances_[fid] += tree_feature_importance[fid]
def load_feature_importance(self, feat_importance_param):
param = list(feat_importance_param)
rs_dict = {}
for fp in param:
key = fp.fid
importance = FeatureImportance()
importance.from_protobuf(fp)
rs_dict[key] = importance
self.feature_importances_ = rs_dict
def get_anonymous_importance(self):
new_feat_importance = {}
for key in self.feature_importances_:
anonymous_name = self.anonymous_header[self.feature_name_fid_mapping[key]]
party = Anonymous.get_role_from_anonymous_column(anonymous_name)
party_id = Anonymous.get_party_id_from_anonymous_column(anonymous_name)
anonymous_feat = Anonymous.get_suffix_from_anonymous_column(anonymous_name)
new_feat_importance[(party + '_' + party_id, anonymous_feat)] = self.feature_importances_[key]
return new_feat_importance
def align_feature_importance_host(self, suffix):
"""
send feature importance to guest to update global feature importance
"""
new_feat_importance = self.get_anonymous_importance()
self.hetero_sbt_transfer_variable.host_feature_importance.remote(new_feat_importance, suffix=suffix)
def preprocess(self):
if self.multi_mode == consts.MULTI_OUTPUT:
self.booster_dim = 1
def postprocess(self):
pass
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
flow_id = self.generate_flowid(epoch_idx, booster_dim)
complete_secure = True if (epoch_idx < self.complete_secure) else False
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
tree_type, target_host_id = None, None
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree = produce_hetero_tree_learner(role=self.role, tree_param=self.tree_param, flow_id=flow_id,
data_bin=self.data_bin, bin_split_points=self.bin_split_points,
bin_sparse_points=self.bin_sparse_points, task_type=self.task_type,
valid_features=self.sample_valid_features(),
host_party_list=self.component_properties.host_party_idlist,
runtime_idx=self.component_properties.local_partyid,
cipher_compress=self.cipher_compressing,
complete_secure=complete_secure,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id,
guest_depth=self.guest_depth, host_depth=self.host_depth,
mo_tree=(self.multi_mode == consts.MULTI_OUTPUT), bin_num=self.bin_num
)
tree.fit()
self.update_feature_importance(tree.get_feature_importance())
self.align_feature_importance_host(suffix=(epoch_idx, booster_dim))
return tree
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
flow_id = self.generate_flowid(epoch_idx, booster_idx)
runtime_idx = self.component_properties.local_partyid
fast_sbt = (self.boosting_strategy == consts.MIX_TREE or self.boosting_strategy == consts.LAYERED_TREE)
tree_type, target_host_id = None, None
if fast_sbt:
tree_type, target_host_id = self.get_tree_plan(epoch_idx)
tree = load_hetero_tree_learner(self.role, self.tree_param, model_meta, model_param, flow_id,
runtime_idx,
fast_sbt=fast_sbt, tree_type=tree_type, target_host_id=target_host_id)
return tree
def generate_summary(self) -> dict:
summary = {'best_iteration': self.callback_variables.best_iteration, 'is_converged': self.is_converged}
LOGGER.debug('summary is {}'.format(summary))
return summary
@assert_io_num_rows_equal
def predict(self, data_inst):
LOGGER.info('running prediction')
processed_data = self.data_and_header_alignment(data_inst)
self.set_anonymous_header(processed_data)
# sync feature importance if host anonymous change in model migration
if not self.on_training:
self.align_feature_importance_host('predict')
predict_start_round = self.sync_predict_start_round()
rounds = len(self.boosting_model_list) // self.booster_dim
trees = []
for idx in range(predict_start_round, rounds):
for booster_idx in range(self.booster_dim):
tree = self.load_learner(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
trees.append(tree)
if len(trees) == 0:
LOGGER.info('no tree for predicting, prediction done')
return
if self.boosting_strategy == consts.MIX_TREE:
mix_sbt_host_predict(processed_data, self.hetero_sbt_transfer_variable, trees)
else:
if self.EINI_inference and not self.on_training:
sitename = self.role + ':' + str(self.component_properties.local_partyid)
EINI_host_predict(processed_data, trees, sitename, self.component_properties.local_partyid,
self.component_properties.host_party_idlist, self.booster_dim,
self.hetero_sbt_transfer_variable, self.EINI_complexity_check, self.EINI_random_mask,)
else:
sbt_host_predict(processed_data, self.hetero_sbt_transfer_variable, trees)
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.booster_meta)
model_meta.num_trees = self.boosting_round
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.boosting_strategy = self.boosting_strategy
model_meta.module = "HeteroSecureBoost"
meta_name = "HeteroSecureBoostingTreeHostMeta"
return meta_name, model_meta
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(self.boosting_model_list)
model_param.tree_dim = self.booster_dim
model_param.trees_.extend(self.boosting_model_list)
anonymous_name_mapping = {}
for fid, name in self.feature_name_fid_mapping.items():
anonymous_name_mapping[self.anonymous_header[name]] = name
model_param.anonymous_name_mapping.update(anonymous_name_mapping)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
if self.boosting_strategy == consts.STD_TREE:
model_param.model_name = consts.HETERO_SBT
elif self.boosting_strategy == consts.LAYERED_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_LAYERED
elif self.boosting_strategy == consts.MIX_TREE:
model_param.model_name = consts.HETERO_FAST_SBT_MIX
model_param.best_iteration = self.callback_variables.best_iteration
model_param.tree_plan.extend(plan.encode_plan(self.tree_plan))
feature_importances = list(self.feature_importances_.items())
feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True)
feature_importance_param = []
for fid, importance in feature_importances:
feature_importance_param.append(FeatureImportanceInfo(sitename=consts.HOST_LOCAL,
fid=fid,
importance=importance.importance,
fullname=self.feature_name_fid_mapping[fid],
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
param_name = "HeteroSecureBoostingTreeHostParam"
return param_name, model_param
def set_model_meta(self, model_meta):
if not self.is_warm_start:
self.boosting_round = model_meta.num_trees
self.booster_meta = model_meta.tree_meta
self.bin_num = model_meta.quantile_meta.bin_num
self.boosting_strategy = model_meta.boosting_strategy
def set_model_param(self, model_param):
self.boosting_model_list = list(model_param.trees_)
self.booster_dim = model_param.tree_dim
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
self.tree_plan = plan.decode_plan(model_param.tree_plan)
self.load_feature_importance(model_param.feature_importances)
# implement abstract function
def check_label(self, *args):
pass
| 13,864 | 46.320819 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/homo_secureboost/homo_secureboost_client.py
|
import copy
import functools
import numpy as np
from typing import List
from operator import itemgetter
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.statistic.data_overview import with_weight
from federatedml.feature.sparse_vector import SparseVector
from federatedml.feature.fate_element_type import NoneType
from federatedml.ensemble import HeteroSecureBoostingTreeGuest
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.param.boosting_param import HomoSecureBoostParam
from federatedml.ensemble.boosting.homo_boosting import HomoBoostingClient
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance import FeatureImportance
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_client import HomoDecisionTreeClient
make_readable_feature_importance = HeteroSecureBoostingTreeGuest.make_readable_feature_importance
class HomoSecureBoostingTreeClient(HomoBoostingClient):
def __init__(self):
super(HomoSecureBoostingTreeClient, self).__init__()
self.model_name = 'HomoSecureBoost'
self.tree_param = None # decision tree param
self.use_missing = False
self.zero_as_missing = False
self.cur_epoch_idx = -1
self.grad_and_hess = None
self.feature_importance_ = {}
self.model_param = HomoSecureBoostParam()
# memory back end
self.backend = consts.DISTRIBUTED_BACKEND
self.bin_arr, self.sample_id_arr = None, None
# mo tree
self.multi_mode = consts.SINGLE_OUTPUT
def _init_model(self, boosting_param: HomoSecureBoostParam):
super(HomoSecureBoostingTreeClient, self)._init_model(boosting_param)
self.use_missing = boosting_param.use_missing
self.zero_as_missing = boosting_param.zero_as_missing
self.tree_param = boosting_param.tree_param
self.backend = boosting_param.backend
self.multi_mode = boosting_param.multi_mode
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
def get_valid_features(self, epoch_idx, b_idx):
valid_feature = self.transfer_inst.valid_features.get(idx=0, suffix=('valid_features', epoch_idx, b_idx))
return valid_feature
def process_sample_weights(self, grad_and_hess, data_with_sample_weight=None):
# add sample weights to gradient and hessian
if data_with_sample_weight is not None:
if with_weight(data_with_sample_weight):
LOGGER.info('weighted sample detected, multiply g/h by weights')
grad_and_hess = grad_and_hess.join(data_with_sample_weight,
lambda v1, v2: (v1[0] * v2.weight, v1[1] * v2.weight))
return grad_and_hess
def compute_local_grad_and_hess(self, y_hat, data_with_sample_weight):
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, loss_method.predict(f_val)),
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
grad_and_hess = self.process_sample_weights(grad_and_hess, data_with_sample_weight)
return grad_and_hess
@staticmethod
def get_subtree_grad_and_hess(g_h, t_idx: int):
"""
grad and hess of sub tree
"""
LOGGER.info("get grad and hess of tree {}".format(t_idx))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][t_idx], grad_and_hess[1][t_idx]))
return grad_and_hess_subtree
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importance_:
self.feature_importance_[fid] = tree_feature_importance[fid]
else:
self.feature_importance_[fid] += tree_feature_importance[fid]
"""
Functions for memory backends
"""
@staticmethod
def _handle_zero_as_missing(inst, feat_num, missing_bin_idx):
"""
This for use_missing + zero_as_missing case
"""
sparse_vec = inst.features.sparse_vec
arr = np.zeros(feat_num, dtype=np.uint8) + missing_bin_idx
for k, v in sparse_vec.items():
if v != NoneType():
arr[k] = v
inst.features = arr
return inst
@staticmethod
def _map_missing_bin(inst, bin_index):
arr_bin = copy.deepcopy(inst.features)
arr_bin[arr_bin == NoneType()] = bin_index
inst.features = arr_bin
return inst
@staticmethod
def _fill_nan(inst):
arr = copy.deepcopy(inst.features)
nan_index = np.isnan(arr)
arr = arr.astype(np.object)
arr[nan_index] = NoneType()
inst.features = arr
return inst
@staticmethod
def _sparse_recover(inst, feat_num):
arr = np.zeros(feat_num)
for k, v in inst.features.sparse_vec.items():
arr[k] = v
inst.features = arr
return inst
def data_preporcess(self, data_inst):
"""
override parent function
"""
need_transform_to_sparse = self.backend == consts.DISTRIBUTED_BACKEND or \
(self.backend == consts.MEMORY_BACKEND and self.use_missing and self.zero_as_missing)
backup_schema = copy.deepcopy(data_inst.schema)
if self.backend == consts.MEMORY_BACKEND:
# memory backend only support dense format input
data_example = data_inst.take(1)[0][1]
if isinstance(data_example.features, SparseVector):
recover_func = functools.partial(self._sparse_recover, feat_num=len(data_inst.schema['header']))
data_inst = data_inst.mapValues(recover_func)
data_inst.schema = backup_schema
if need_transform_to_sparse:
data_inst = self.data_alignment(data_inst)
elif self.use_missing:
# fill nan
data_inst = data_inst.mapValues(self._fill_nan)
data_inst.schema = backup_schema
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.federated_binning(data_inst)
if self.backend == consts.MEMORY_BACKEND:
if self.use_missing and self.zero_as_missing:
feat_num = len(self.bin_split_points)
func = functools.partial(self._handle_zero_as_missing, feat_num=feat_num, missing_bin_idx=self.bin_num)
self.data_bin = self.data_bin.mapValues(func)
elif self.use_missing: # use missing only
missing_bin_index = self.bin_num
func = functools.partial(self._map_missing_bin, bin_index=missing_bin_index)
self.data_bin = self.data_bin.mapValues(func)
self._collect_data_arr(self.data_bin)
def _collect_data_arr(self, bin_arr_table):
bin_arr = []
id_list = []
for id_, inst in bin_arr_table.collect():
bin_arr.append(inst.features)
id_list.append(id_)
self.bin_arr = np.asfortranarray(np.stack(bin_arr, axis=0).astype(np.uint8))
self.sample_id_arr = np.array(id_list)
def preprocess(self):
if self.multi_mode == consts.MULTI_OUTPUT:
self.booster_dim = 1
LOGGER.debug('multi mode tree dim reset to 1')
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
valid_features = self.get_valid_features(epoch_idx, booster_dim)
LOGGER.debug('valid features are {}'.format(valid_features))
if self.cur_epoch_idx != epoch_idx:
# update g/h every epoch
self.grad_and_hess = self.compute_local_grad_and_hess(self.y_hat, self.data_inst)
self.cur_epoch_idx = epoch_idx
if self.multi_mode == consts.MULTI_OUTPUT:
g_h = self.grad_and_hess
else:
g_h = self.get_subtree_grad_and_hess(self.grad_and_hess, booster_dim)
flow_id = self.generate_flowid(epoch_idx, booster_dim)
new_tree = HomoDecisionTreeClient(
self.tree_param,
self.data_bin,
self.bin_split_points,
self.bin_sparse_points,
g_h,
valid_feature=valid_features,
epoch_idx=epoch_idx,
role=self.role,
flow_id=flow_id,
tree_idx=booster_dim,
mode='train')
if self.backend == consts.DISTRIBUTED_BACKEND:
new_tree.fit()
elif self.backend == consts.MEMORY_BACKEND:
# memory backend needed variable
LOGGER.debug('running memory fit')
new_tree.arr_bin_data = self.bin_arr
new_tree.bin_num = self.bin_num
new_tree.sample_id_arr = self.sample_id_arr
new_tree.memory_fit()
self.update_feature_importance(new_tree.get_feature_importance())
return new_tree
@staticmethod
def predict_helper(data, tree_list: List[HomoDecisionTreeClient], init_score, zero_as_missing, use_missing,
learning_rate, class_num=1):
weight_list = []
for tree in tree_list:
weight = tree.traverse_tree(data, tree.tree_node, use_missing=use_missing, zero_as_missing=zero_as_missing)
weight_list.append(weight)
weights = np.array(weight_list)
if class_num > 2:
weights = weights.reshape((-1, class_num))
return np.sum(weights * learning_rate, axis=0) + init_score
else:
return np.sum(weights * learning_rate, axis=0) + init_score
def fast_homo_tree_predict(self, data_inst, ret_format='std'):
assert ret_format in ['std', 'raw'], 'illegal ret format'
LOGGER.info('running fast homo tree predict')
to_predict_data = self.data_and_header_alignment(data_inst)
tree_list = []
rounds = len(self.boosting_model_list) // self.booster_dim
for idx in range(0, rounds):
for booster_idx in range(self.booster_dim):
model = self.load_learner(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
tree_list.append(model)
func = functools.partial(self.predict_helper, tree_list=tree_list, init_score=self.init_score,
zero_as_missing=self.zero_as_missing, use_missing=self.use_missing,
learning_rate=self.learning_rate, class_num=self.booster_dim)
predict_rs = to_predict_data.mapValues(func)
if ret_format == 'std':
return self.score_to_predict_result(data_inst, predict_rs)
elif ret_format == 'raw':
return predict_rs
else:
raise ValueError('illegal ret format')
@assert_io_num_rows_equal
def predict(self, data_inst, ret_format='std'):
return self.fast_homo_tree_predict(data_inst, ret_format=ret_format)
def generate_summary(self) -> dict:
summary = {'feature_importance': make_readable_feature_importance(self.feature_name_fid_mapping,
self.feature_importance_),
'validation_metrics': self.callback_variables.validation_summary}
return summary
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
tree_inst = HomoDecisionTreeClient(tree_param=self.tree_param, mode='predict')
tree_inst.load_model(model_meta=model_meta, model_param=model_param)
return tree_inst
def load_feature_importance(self, feat_importance_param):
param = list(feat_importance_param)
rs_dict = {}
for fp in param:
key = fp.fid
importance = FeatureImportance()
importance.from_protobuf(fp)
rs_dict[key] = importance
self.feature_importance_ = rs_dict
LOGGER.debug('load feature importance": {}'.format(self.feature_importance_))
def set_model_param(self, model_param):
self.boosting_model_list = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.classes_ = list(map(int, model_param.classes_))
self.booster_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
self.load_feature_importance(model_param.feature_importances)
# initialize loss function
self.loss = self.get_loss_function()
def set_model_meta(self, model_meta):
if not self.is_warm_start:
self.boosting_round = model_meta.num_trees
self.n_iter_no_change = model_meta.n_iter_no_change
self.tol = model_meta.tol
self.bin_num = model_meta.quantile_meta.bin_num
self.learning_rate = model_meta.learning_rate
self.booster_meta = model_meta.tree_meta
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(list(self.boosting_model_list))
model_param.tree_dim = self.booster_dim
model_param.trees_.extend(self.boosting_model_list)
model_param.init_score.extend(self.init_score)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
model_param.best_iteration = -1
model_param.model_name = consts.HOMO_SBT
feature_importance = list(self.feature_importance_.items())
feature_importance = sorted(feature_importance, key=itemgetter(1), reverse=True)
feature_importance_param = []
for fid, importance in feature_importance:
feature_importance_param.append(FeatureImportanceInfo(fid=fid,
fullname=self.feature_name_fid_mapping[fid],
sitename=self.role,
importance=importance.importance,
importance2=importance.importance_2,
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
param_name = "HomoSecureBoostingTreeGuestParam"
return param_name, model_param
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.booster_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.boosting_round
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
model_meta.use_missing = self.use_missing
model_meta.zero_as_missing = self.zero_as_missing
model_meta.module = 'HomoSecureBoost'
meta_name = "HomoSecureBoostingTreeGuestMeta"
return meta_name, model_meta
| 16,769 | 41.348485 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/homo_secureboost/homo_secureboost_arbiter.py
|
import numpy as np
from numpy import random
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.ensemble.boosting.homo_boosting import HomoBoostingArbiter
from federatedml.param.boosting_param import HomoSecureBoostParam
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_arbiter import HomoDecisionTreeArbiter
class HomoSecureBoostingTreeArbiter(HomoBoostingArbiter):
def __init__(self):
super(HomoSecureBoostingTreeArbiter, self).__init__()
self.model_name = 'HomoSecureBoost'
self.tree_param = None # decision tree param
self.use_missing = False
self.zero_as_missing = False
self.cur_epoch_idx = -1
self.grad_and_hess = None
self.feature_importances_ = {}
self.model_param = HomoSecureBoostParam()
self.multi_mode = consts.SINGLE_OUTPUT
def _init_model(self, boosting_param: HomoSecureBoostParam):
super(HomoSecureBoostingTreeArbiter, self)._init_model(boosting_param)
self.use_missing = boosting_param.use_missing
self.zero_as_missing = boosting_param.zero_as_missing
self.tree_param = boosting_param.tree_param
self.multi_mode = boosting_param.multi_mode
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
def send_valid_features(self, valid_features, epoch_idx, b_idx):
self.transfer_inst.valid_features.remote(valid_features, idx=-1, suffix=('valid_features', epoch_idx, b_idx))
def sample_valid_features(self):
LOGGER.info("sample valid features")
chosen_feature = random.choice(range(0, self.feature_num),
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
def preprocess(self):
if self.multi_mode == consts.MULTI_OUTPUT:
self.booster_dim = 1
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
valid_feature = self.sample_valid_features()
self.send_valid_features(valid_feature, epoch_idx, booster_dim)
flow_id = self.generate_flowid(epoch_idx, booster_dim)
new_tree = HomoDecisionTreeArbiter(self.tree_param, valid_feature=valid_feature, epoch_idx=epoch_idx,
flow_id=flow_id, tree_idx=booster_dim)
new_tree.fit()
return new_tree
def generate_summary(self) -> dict:
summary = {'loss_history': self.history_loss}
return summary
# homo tree arbiter doesnt save model
def get_cur_model(self):
return None
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
pass
def set_model_param(self, model_param):
pass
def set_model_meta(self, model_meta):
pass
def get_model_param(self):
pass
def get_model_meta(self):
pass
| 3,150 | 35.218391 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/homo_secureboost/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/secureboost_util/tree_model_io.py
|
from federatedml.param.boosting_param import DecisionTreeParam
from federatedml.ensemble.basic_algorithms import HeteroFastDecisionTreeGuest, HeteroFastDecisionTreeHost, \
HeteroDecisionTreeGuest, HeteroDecisionTreeHost
from federatedml.util import consts
def produce_hetero_tree_learner(role, tree_param: DecisionTreeParam, flow_id, data_bin, bin_split_points,
bin_sparse_points, task_type, valid_features, host_party_list,
runtime_idx,
cipher_compress=True,
mo_tree=False,
class_num=1,
g_h=None, encrypter=None, # guest only
goss_subsample=False, complete_secure=False,
max_sample_weights=1.0,
objective=None,
bin_num=None, # host only
fast_sbt=False,
tree_type=None, target_host_id=None, # fast sbt only
guest_depth=2, host_depth=3 # fast sbt only
):
if role == consts.GUEST:
if not fast_sbt:
tree = HeteroDecisionTreeGuest(tree_param)
else:
tree = HeteroFastDecisionTreeGuest(tree_param)
tree.set_tree_work_mode(tree_type, target_host_id)
tree.set_layered_depth(guest_depth, host_depth)
tree.init(flowid=flow_id,
data_bin=data_bin,
bin_split_points=bin_split_points,
bin_sparse_points=bin_sparse_points,
grad_and_hess=g_h,
encrypter=encrypter,
task_type=task_type,
valid_features=valid_features,
host_party_list=host_party_list,
runtime_idx=runtime_idx,
goss_subsample=goss_subsample,
complete_secure=complete_secure,
cipher_compressing=cipher_compress,
max_sample_weight=max_sample_weights,
mo_tree=mo_tree,
class_num=class_num,
objective=objective
)
elif role == consts.HOST:
if not fast_sbt:
tree = HeteroDecisionTreeHost(tree_param)
else:
tree = HeteroFastDecisionTreeHost(tree_param)
tree.set_tree_work_mode(tree_type, target_host_id)
tree.set_layered_depth(guest_depth, host_depth)
tree.set_self_host_id(runtime_idx)
tree.set_host_party_idlist(host_party_list)
tree.init(flowid=flow_id,
valid_features=valid_features,
data_bin=data_bin,
bin_split_points=bin_split_points,
bin_sparse_points=bin_sparse_points,
runtime_idx=runtime_idx,
goss_subsample=goss_subsample,
complete_secure=complete_secure,
cipher_compressing=cipher_compress,
bin_num=bin_num,
mo_tree=mo_tree
)
else:
raise ValueError('unknown role: {}'.format(role))
return tree
def load_hetero_tree_learner(role, tree_param, model_meta, model_param, flow_id, runtime_idx, host_party_list=None,
fast_sbt=False, tree_type=None, target_host_id=None):
if role == consts.HOST:
if fast_sbt:
tree = HeteroFastDecisionTreeHost(tree_param)
else:
tree = HeteroDecisionTreeHost(tree_param)
tree.load_model(model_meta, model_param)
tree.set_flowid(flow_id)
tree.set_runtime_idx(runtime_idx)
if fast_sbt:
tree.set_tree_work_mode(tree_type, target_host_id)
tree.set_self_host_id(runtime_idx)
elif role == consts.GUEST:
if fast_sbt:
tree = HeteroFastDecisionTreeGuest(tree_param)
else:
tree = HeteroDecisionTreeGuest(tree_param)
tree.load_model(model_meta, model_param)
tree.set_flowid(flow_id)
tree.set_runtime_idx(runtime_idx)
tree.set_host_party_idlist(host_party_list)
if fast_sbt:
tree.set_tree_work_mode(tree_type, target_host_id)
else:
raise ValueError('unknown role: {}'.format(role))
return tree
| 4,470 | 38.219298 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/secureboost_util/boosting_tree_predict.py
|
import functools
import numpy as np
import random
from typing import List
from federatedml.util import consts
from federatedml.secureprotol import PaillierEncrypt
from federatedml.ensemble.basic_algorithms import HeteroDecisionTreeGuest, HeteroDecisionTreeHost, \
HeteroFastDecisionTreeGuest, HeteroFastDecisionTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree, Node
from federatedml.util import LOGGER
from federatedml.transfer_variable.transfer_class.hetero_secure_boosting_predict_transfer_variable import \
HeteroSecureBoostTransferVariable
"""
Hetero guest predict utils
"""
def generate_leaf_pos_dict(x, tree_num, np_int_type=np.int8):
"""
x: just occupy the first parameter position
return: a numpy array record sample pos, and a counter counting how many trees reach a leaf node
"""
node_pos = np.zeros(tree_num, dtype=np_int_type)
reach_leaf_node = np.zeros(tree_num, dtype=np.bool)
return {'node_pos': node_pos, 'reach_leaf_node': reach_leaf_node}
def guest_traverse_a_tree(tree: HeteroDecisionTreeGuest, sample, cur_node_idx):
reach_leaf = False
# only need nid here, predict state is not needed
rs = tree.traverse_tree(tree_=tree.tree_node, data_inst=sample, predict_state=(cur_node_idx, -1),
decoder=tree.decode, sitename=tree.sitename, use_missing=tree.use_missing,
split_maskdict=tree.split_maskdict, missing_dir_maskdict=tree.missing_dir_maskdict,
zero_as_missing=tree.zero_as_missing, return_leaf_id=True)
if not isinstance(rs, tuple):
reach_leaf = True
leaf_id = rs
return leaf_id, reach_leaf
else:
cur_node_idx = rs[0]
return cur_node_idx, reach_leaf
def guest_traverse_trees(node_pos, sample, trees: List[HeteroDecisionTreeGuest]):
if node_pos['reach_leaf_node'].all():
return node_pos
for t_idx, tree in enumerate(trees):
cur_node_idx = node_pos['node_pos'][t_idx]
# reach leaf
if cur_node_idx == -1:
continue
rs, reach_leaf = guest_traverse_a_tree(tree, sample, cur_node_idx)
if reach_leaf:
node_pos['reach_leaf_node'][t_idx] = True
node_pos['node_pos'][t_idx] = rs
return node_pos
def merge_predict_pos(node_pos1, node_pos2):
pos_arr1 = node_pos1['node_pos']
pos_arr2 = node_pos2['node_pos']
stack_arr = np.stack([pos_arr1, pos_arr2])
node_pos1['node_pos'] = np.max(stack_arr, axis=0)
return node_pos1
def add_y_hat(leaf_pos, init_score, learning_rate, trees: List[HeteroDecisionTreeGuest], multi_class_num=None):
# finally node pos will hold weights
weights = []
for leaf_idx, tree in zip(leaf_pos, trees):
weights.append(tree.tree_node[int(leaf_idx)].weight)
weights = np.array(weights)
if multi_class_num > 2:
weights = weights.reshape((-1, multi_class_num))
return np.sum(weights * learning_rate, axis=0) + init_score
def get_predict_scores(
leaf_pos,
learning_rate,
init_score,
trees: List[HeteroDecisionTreeGuest],
multi_class_num=-1,
predict_cache=None):
if predict_cache:
init_score = 0 # prevent init_score re-add
predict_func = functools.partial(add_y_hat,
learning_rate=learning_rate, init_score=init_score, trees=trees,
multi_class_num=multi_class_num)
predict_result = leaf_pos.mapValues(predict_func)
if predict_cache:
predict_result = predict_result.join(predict_cache, lambda v1, v2: v1 + v2)
return predict_result
def save_leaf_pos_helper(v1, v2):
reach_leaf_idx = v2['reach_leaf_node']
select_idx = reach_leaf_idx & (v2['node_pos'] != -1) # reach leaf and are not recorded( if recorded idx is -1)
v1[select_idx] = v2['node_pos'][select_idx]
return v1
def mask_leaf_pos(v):
reach_leaf_idx = v['reach_leaf_node']
v['node_pos'][reach_leaf_idx] = -1
return v
def save_leaf_pos_and_mask_leaf_pos(node_pos_tb, final_leaf_pos):
# save leaf pos
saved_leaf_pos = final_leaf_pos.join(node_pos_tb, save_leaf_pos_helper)
rest_part = final_leaf_pos.subtractByKey(saved_leaf_pos)
final_leaf_pos = saved_leaf_pos.union(rest_part)
# mask leaf pos
node_pos_tb = node_pos_tb.mapValues(mask_leaf_pos)
return node_pos_tb, final_leaf_pos
def merge_leaf_pos(pos1, pos2):
return pos1 + pos2
def traverse_guest_local_trees(node_pos, sample, trees: List[HeteroFastDecisionTreeGuest]):
"""
in mix mode, a sample can reach leaf directly
"""
for t_idx, tree in enumerate(trees):
cur_node_idx = node_pos[t_idx]
if not tree.use_guest_feat_only_predict_mode:
continue
rs, reach_leaf = guest_traverse_a_tree(tree, sample, cur_node_idx)
node_pos[t_idx] = rs
return node_pos
"""
Hetero guest predict function
"""
def get_dtype(max_int):
if max_int < (2**8) / 2:
return np.int8
elif max_int < (2**16) / 2:
return np.int16
else:
return np.int64
def sbt_guest_predict(data_inst, transfer_var: HeteroSecureBoostTransferVariable,
trees: List[HeteroDecisionTreeGuest], learning_rate, init_score, booster_dim,
predict_cache=None, pred_leaf=False):
tree_num = len(trees)
max_depth = trees[0].max_depth
max_int = 2 ** max_depth
dtype = get_dtype(max_int)
LOGGER.debug('chosen np dtype is {}'.format(dtype))
generate_func = functools.partial(generate_leaf_pos_dict, tree_num=tree_num, np_int_type=dtype)
node_pos_tb = data_inst.mapValues(generate_func) # record node pos
final_leaf_pos = data_inst.mapValues(lambda x: np.zeros(tree_num, dtype=dtype) + np.nan) # record final leaf pos
traverse_func = functools.partial(guest_traverse_trees, trees=trees)
comm_round = 0
while True:
# LOGGER.info('cur predict round is {}'.format(comm_round))
node_pos_tb = node_pos_tb.join(data_inst, traverse_func)
node_pos_tb, final_leaf_pos = save_leaf_pos_and_mask_leaf_pos(node_pos_tb, final_leaf_pos)
# remove sample that reaches leaves of all trees
reach_leaf_samples = node_pos_tb.filter(lambda key, value: value['reach_leaf_node'].all())
node_pos_tb = node_pos_tb.subtractByKey(reach_leaf_samples)
if node_pos_tb.count() == 0:
transfer_var.predict_stop_flag.remote(True, idx=-1, suffix=(comm_round,))
break
transfer_var.predict_stop_flag.remote(False, idx=-1, suffix=(comm_round,))
transfer_var.guest_predict_data.remote(node_pos_tb, idx=-1, suffix=(comm_round,))
host_pos_tbs = transfer_var.host_predict_data.get(idx=-1, suffix=(comm_round,))
for host_pos_tb in host_pos_tbs:
node_pos_tb = node_pos_tb.join(host_pos_tb, merge_predict_pos)
comm_round += 1
if pred_leaf: # return leaf position only
return final_leaf_pos
else: # get final predict scores from leaf pos
predict_result = get_predict_scores(leaf_pos=final_leaf_pos, learning_rate=learning_rate,
init_score=init_score, trees=trees,
multi_class_num=booster_dim, predict_cache=predict_cache)
return predict_result
def mix_sbt_guest_predict(data_inst, transfer_var: HeteroSecureBoostTransferVariable,
trees: List[HeteroDecisionTreeGuest], learning_rate, init_score, booster_dim,
predict_cache=None, pred_leaf=False):
LOGGER.info('running mix mode predict')
tree_num = len(trees)
node_pos = data_inst.mapValues(lambda x: np.zeros(tree_num, dtype=np.int64))
# traverse local trees
traverse_func = functools.partial(traverse_guest_local_trees, trees=trees)
guest_leaf_pos = node_pos.join(data_inst, traverse_func)
# get leaf node from other host parties
host_leaf_pos_list = transfer_var.host_predict_data.get(idx=-1)
for host_leaf_pos in host_leaf_pos_list:
guest_leaf_pos = guest_leaf_pos.join(host_leaf_pos, merge_leaf_pos)
if pred_leaf: # predict leaf, return leaf position only
return guest_leaf_pos
else:
predict_result = get_predict_scores(leaf_pos=guest_leaf_pos, learning_rate=learning_rate,
init_score=init_score, trees=trees,
multi_class_num=booster_dim, predict_cache=predict_cache)
return predict_result
"""
Hetero host predict utils
"""
def host_traverse_a_tree(tree: HeteroDecisionTreeHost, sample, cur_node_idx):
nid, _ = tree.traverse_tree(predict_state=(cur_node_idx, -1), data_inst=sample,
decoder=tree.decode, split_maskdict=tree.split_maskdict,
missing_dir_maskdict=tree.missing_dir_maskdict, sitename=tree.sitename,
tree_=tree.tree_node, zero_as_missing=tree.zero_as_missing,
use_missing=tree.use_missing)
return nid, _
def host_traverse_trees(sample, leaf_pos, trees: List[HeteroDecisionTreeHost]):
for t_idx, tree in enumerate(trees):
cur_node_idx = leaf_pos['node_pos'][t_idx]
# idx is set as -1 when a sample reaches leaf
if cur_node_idx == -1:
continue
nid, _ = host_traverse_a_tree(tree, sample, cur_node_idx)
leaf_pos['node_pos'][t_idx] = nid
return leaf_pos
def traverse_host_local_trees(node_pos, sample, trees: List[HeteroFastDecisionTreeHost]):
"""
in mix mode, a sample can reach leaf directly
"""
for i in range(len(trees)):
tree = trees[i]
if len(tree.tree_node) == 0: # this tree belongs to other party because it has no tree node
continue
leaf_id = tree.host_local_traverse_tree(sample, tree.tree_node, use_missing=tree.use_missing,
zero_as_missing=tree.zero_as_missing)
node_pos[i] = leaf_id
return node_pos
"""
Hetero host predict function
"""
def sbt_host_predict(data_inst, transfer_var: HeteroSecureBoostTransferVariable, trees: List[HeteroDecisionTreeHost]):
comm_round = 0
traverse_func = functools.partial(host_traverse_trees, trees=trees)
while True:
LOGGER.debug('cur predict round is {}'.format(comm_round))
stop_flag = transfer_var.predict_stop_flag.get(idx=0, suffix=(comm_round,))
if stop_flag:
break
guest_node_pos = transfer_var.guest_predict_data.get(idx=0, suffix=(comm_round,))
host_node_pos = data_inst.join(guest_node_pos, traverse_func)
if guest_node_pos.count() != host_node_pos.count():
raise ValueError('sample count mismatch: guest table {}, host table {}'.format(guest_node_pos.count(),
host_node_pos.count()))
transfer_var.host_predict_data.remote(host_node_pos, idx=-1, suffix=(comm_round,))
comm_round += 1
def mix_sbt_host_predict(data_inst, transfer_var: HeteroSecureBoostTransferVariable,
trees: List[HeteroDecisionTreeHost]):
LOGGER.info('running mix mode predict')
tree_num = len(trees)
node_pos = data_inst.mapValues(lambda x: np.zeros(tree_num, dtype=np.int64))
local_traverse_func = functools.partial(traverse_host_local_trees, trees=trees)
leaf_pos = node_pos.join(data_inst, local_traverse_func)
transfer_var.host_predict_data.remote(leaf_pos, idx=0, role=consts.GUEST)
"""
Fed-EINI predict func
"""
def get_leaf_idx_map(trees):
id_pos_map_list = []
for tree in trees:
array_idx = 0
id_pos_map = {}
for node in tree.tree_node:
if node.is_leaf:
id_pos_map[node.id] = array_idx
array_idx += 1
id_pos_map_list.append(id_pos_map)
return id_pos_map_list
def go_to_children_branches(data_inst, tree_node, tree, sitename: str, candidate_list: List):
if tree_node.is_leaf:
candidate_list.append(tree_node)
else:
tree_node_list = tree.tree_node
if tree_node.sitename != sitename:
go_to_children_branches(data_inst, tree_node_list[tree_node.left_nodeid],
tree, sitename, candidate_list)
go_to_children_branches(data_inst, tree_node_list[tree_node.right_nodeid],
tree, sitename, candidate_list)
else:
next_layer_node_id = tree.go_next_layer(tree_node, data_inst, use_missing=tree.use_missing,
zero_as_missing=tree.zero_as_missing, decoder=tree.decode,
split_maskdict=tree.split_maskdict,
missing_dir_maskdict=tree.missing_dir_maskdict,
bin_sparse_point=None
)
go_to_children_branches(data_inst, tree_node_list[next_layer_node_id], tree, sitename, candidate_list)
def generate_leaf_candidates_guest(data_inst, sitename, trees, node_pos_map_list,
init_score, learning_rate, booster_dim):
candidate_nodes_of_all_tree = []
if booster_dim > 2:
epoch_num = len(trees) // booster_dim
else:
epoch_num = len(trees)
init_score = init_score / epoch_num
score_idx = 0
for tree, node_pos_map in zip(trees, node_pos_map_list):
if booster_dim > 2:
tree_init_score = init_score[score_idx]
score_idx += 1
if score_idx == booster_dim:
score_idx = 0
else:
tree_init_score = init_score
candidate_list = []
go_to_children_branches(data_inst, tree.tree_node[0], tree, sitename, candidate_list)
# check if it is mo tree:
if len(candidate_list) < 1:
raise ValueError('incorrect candidate list length,: {}'.format(len(candidate_list)))
node = candidate_list[0]
result_vec = np.zeros(len(node_pos_map))
if isinstance(node.weight, np.ndarray):
if len(node.weight) > 1:
result_vec = [np.array([0 for i in range(len(node.weight))]) for i in range(len(node_pos_map))]
for node in candidate_list:
result_vec[node_pos_map[node.id]] = node.weight * learning_rate + tree_init_score
candidate_nodes_of_all_tree.extend(result_vec)
return np.array(candidate_nodes_of_all_tree)
def EINI_guest_predict(data_inst, trees: List[HeteroDecisionTreeGuest], learning_rate, init_score, booster_dim,
encrypt_key_length, transfer_var: HeteroSecureBoostTransferVariable,
sitename=None, party_list=None, predict_cache=None, pred_leaf=False):
if sitename is None:
raise ValueError('input sitename is None, not able to run EINI predict algorithm')
if pred_leaf:
raise ValueError('EINI predict mode does not support leaf idx prediction')
# EINI algorithms
id_pos_map_list = get_leaf_idx_map(trees)
map_func = functools.partial(generate_leaf_candidates_guest, sitename=sitename, trees=trees,
node_pos_map_list=id_pos_map_list, init_score=init_score,
learning_rate=learning_rate, booster_dim=booster_dim)
position_vec = data_inst.mapValues(map_func)
# encryption
encrypter = PaillierEncrypt()
encrypter.generate_key(encrypt_key_length)
encrypter_vec_table = position_vec.mapValues(encrypter.recursive_encrypt)
# federation part
# send to first host party
transfer_var.guest_predict_data.remote(encrypter_vec_table, idx=0, suffix='position_vec', role=consts.HOST)
# get from last host party
result_table = transfer_var.host_predict_data.get(idx=len(party_list) - 1, suffix='merge_result', role=consts.HOST)
# decode result
result = result_table.mapValues(encrypter.recursive_decrypt)
# reformat
result = result.mapValues(lambda x: np.array(x))
if predict_cache:
result = result.join(predict_cache, lambda v1, v2: v1 + v2)
return result
def generate_leaf_candidates_host(data_inst, sitename, trees, node_pos_map_list):
candidate_nodes_of_all_tree = []
for tree, node_pos_map in zip(trees, node_pos_map_list):
result_vec = [0 for i in range(len(node_pos_map))]
candidate_list = []
go_to_children_branches(data_inst, tree.tree_node[0], tree, sitename, candidate_list)
for node in candidate_list:
result_vec[node_pos_map[node.id]] = 1 # create 0-1 vector
candidate_nodes_of_all_tree.extend(result_vec)
return np.array(candidate_nodes_of_all_tree)
def generate_leaf_idx_dimension_map(trees, booster_dim):
cur_dim = 0
leaf_dim_map = {}
leaf_idx = 0
for tree in trees:
for node in tree.tree_node:
if node.is_leaf:
leaf_dim_map[leaf_idx] = cur_dim
leaf_idx += 1
cur_dim += 1
if cur_dim == booster_dim:
cur_dim = 0
return leaf_dim_map
def merge_position_vec(host_vec, guest_encrypt_vec, booster_dim=1, leaf_idx_dim_map=None, random_mask=None):
leaf_idx = -1
rs = [0 for i in range(booster_dim)]
for en_num, vec_value in zip(guest_encrypt_vec, host_vec):
leaf_idx += 1
if vec_value == 0:
continue
else:
dim = leaf_idx_dim_map[leaf_idx]
rs[dim] += en_num
if random_mask:
for i in range(len(rs)):
rs[i] = rs[i] * random_mask # a pos random mask btw 1 and 2
return rs
def position_vec_element_wise_mul(guest_encrypt_vec, host_vec):
new_vec = []
for en_num, vec_value in zip(guest_encrypt_vec, host_vec):
new_vec.append(en_num * vec_value)
return new_vec
def count_complexity_helper(node, node_list, host_sitename, meet_host_node):
if node.is_leaf:
return 1 if meet_host_node else 0
if node.sitename == host_sitename:
meet_host_node = True
return count_complexity_helper(node_list[node.left_nodeid], node_list, host_sitename, meet_host_node) + \
count_complexity_helper(node_list[node.right_nodeid], node_list, host_sitename, meet_host_node)
def count_complexity(trees, sitename):
tree_valid_leaves_num = []
for tree in trees:
valid_leaf_num = count_complexity_helper(tree.tree_node[0], tree.tree_node, sitename, False)
if valid_leaf_num != 0:
tree_valid_leaves_num.append(valid_leaf_num)
complexity = 1
for num in tree_valid_leaves_num:
complexity *= num
return complexity
def EINI_host_predict(data_inst, trees: List[HeteroDecisionTreeHost], sitename, self_party_id, party_list,
booster_dim, transfer_var: HeteroSecureBoostTransferVariable,
complexity_check=False, random_mask=False):
if complexity_check:
complexity = count_complexity(trees, sitename)
LOGGER.debug('checking EINI complexity: {}'.format(complexity))
if complexity < consts.EINI_TREE_COMPLEXITY:
raise ValueError('tree complexity: {}, is lower than safe '
'threshold, inference is not allowed.'.format(complexity))
id_pos_map_list = get_leaf_idx_map(trees)
map_func = functools.partial(generate_leaf_candidates_host, sitename=sitename, trees=trees,
node_pos_map_list=id_pos_map_list)
position_vec = data_inst.mapValues(map_func)
booster_dim = booster_dim
random_mask = random.SystemRandom().random() + 1 if random_mask else 0 # generate a random mask btw 1 and 2
self_idx = party_list.index(self_party_id)
if len(party_list) == 1:
guest_position_vec = transfer_var.guest_predict_data.get(idx=0, suffix='position_vec')
leaf_idx_dim_map = generate_leaf_idx_dimension_map(trees, booster_dim)
merge_func = functools.partial(merge_position_vec, booster_dim=booster_dim,
leaf_idx_dim_map=leaf_idx_dim_map, random_mask=random_mask)
result_table = position_vec.join(guest_position_vec, merge_func)
transfer_var.host_predict_data.remote(result_table, suffix='merge_result')
else:
# multi host case
# if is first host party, get encrypt vec from guest, else from previous host party
if self_party_id == party_list[0]:
guest_position_vec = transfer_var.guest_predict_data.get(idx=0, suffix='position_vec')
else:
guest_position_vec = transfer_var.inter_host_data.get(idx=self_idx - 1, suffix='position_vec')
if self_party_id == party_list[-1]:
leaf_idx_dim_map = generate_leaf_idx_dimension_map(trees, booster_dim)
func = functools.partial(merge_position_vec, booster_dim=booster_dim,
leaf_idx_dim_map=leaf_idx_dim_map, random_mask=random_mask)
result_table = position_vec.join(guest_position_vec, func)
transfer_var.host_predict_data.remote(result_table, suffix='merge_result')
else:
result_table = position_vec.join(guest_position_vec, position_vec_element_wise_mul)
transfer_var.inter_host_data.remote(result_table, idx=self_idx + 1, suffix='position_vec', role=consts.HOST)
| 21,769 | 37.059441 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/secureboost/secureboost_util/subsample.py
|
import numpy as np
from fate_arch.session import computing_session
# TODO
def random_sampling():
"""
Normal random row subsample
"""
pass
def goss_sampling(grad_and_hess, top_rate, other_rate):
"""
sampling method introduced in LightGBM
"""
sample_num = grad_and_hess.count()
g_h_generator = grad_and_hess.collect()
id_list, g_list, h_list = [], [], []
for id_, g_h in g_h_generator:
id_list.append(id_)
g_list.append(g_h[0])
h_list.append(g_h[1])
id_type = type(id_list[0])
id_list = np.array(id_list)
g_arr = np.array(g_list).astype(np.float64)
h_arr = np.array(h_list).astype(np.float64)
g_sum_arr = np.abs(g_arr).sum(axis=1) # if it is multi-classification case, we need to sum g
abs_g_list_arr = g_sum_arr
sorted_idx = np.argsort(-abs_g_list_arr, kind='stable') # stable sample result
a_part_num = int(sample_num * top_rate)
b_part_num = int(sample_num * other_rate)
if a_part_num == 0 or b_part_num == 0:
raise ValueError('subsampled result is 0: top sample {}, other sample {}'.format(a_part_num, b_part_num))
# index of a part
a_sample_idx = sorted_idx[:a_part_num]
# index of b part
rest_sample_idx = sorted_idx[a_part_num:]
b_sample_idx = np.random.choice(rest_sample_idx, size=b_part_num, replace=False)
# small gradient sample weights
amplify_weights = (1 - top_rate) / other_rate
g_arr[b_sample_idx] *= amplify_weights
h_arr[b_sample_idx] *= amplify_weights
# get selected sample
a_idx_set, b_idx_set = set(list(a_sample_idx)), set(list(b_sample_idx))
idx_set = a_idx_set.union(b_idx_set)
selected_idx = np.array(list(idx_set))
selected_g, selected_h = g_arr[selected_idx], h_arr[selected_idx]
selected_id = id_list[selected_idx]
data = [(id_type(id_), (g, h)) for id_, g, h in zip(selected_id, selected_g, selected_h)]
new_g_h_table = computing_session.parallelize(data, include_key=True, partition=grad_and_hess.partitions)
return new_g_h_table
| 2,065 | 31.28125 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/hack_encrypter.py
|
class HackDecrypter():
def encrypt(self, val):
return val
def decrypt(self, val):
return val
| 119 | 14 | 27 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/gh_packing_compressing_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import functools
import math
from fate_arch.session import computing_session as session
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim import PackedGHCompressor, GHPacker, fix_point_precision
from federatedml.secureprotol.encrypt import PaillierEncrypt
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.util import consts
import numpy as np
np.random.seed(114514)
def generate_bin_gh(num):
# (-1, 1)
g = np.random.random(num)
h = np.random.random(num)
g = g * 2 - 1
return g, h
def generate_reg_gh(num, lower, upper):
g = np.random.random(num)
h = np.zeros(num) + 2
g = g * (upper - lower) + lower
return g, h
def cmp(a, b):
if a[0] > b[0]:
return 1
else:
return -1
def en_gh_list(g, h, en):
en_g = [en.encrypt(i) for i in g]
en_h = [en.encrypt(i) for i in h]
return en_g, en_h
def truncate(f, n=consts.TREE_DECIMAL_ROUND):
return math.floor(f * 10 ** n) / 10 ** n
def make_random_sum(collected_gh, g, h, en_g_l, en_h_l, max_sample_num):
selected_sample_num = np.random.randint(max_sample_num) + 1 # at least 1 sample
idx = np.random.random(selected_sample_num)
idx = np.unique((idx * max_sample_num).astype(int))
print('randomly select {} samples'.format(len(idx)))
selected_g = g[idx]
selected_h = h[idx]
g_sum = selected_g.sum()
h_sum = selected_h.sum()
g_h_list = sorted(collected_gh, key=functools.cmp_to_key(cmp))
sum_gh = 0
en_g_sum = 0
en_h_sum = 0
for i in idx:
gh = g_h_list[i][1][0]
sum_gh += gh
en_g_sum += en_g_l[i]
en_h_sum += en_h_l[i]
return g_sum, h_sum, sum_gh, en_g_sum, en_h_sum, len(idx)
class TestFeatureHistogram(unittest.TestCase):
@staticmethod
def prepare_testing_data(g, h, en, max_sample_num, sample_id, task_type, g_min=None, g_max=None):
packer = GHPacker(max_sample_num, encrypter=en, sync_para=False, task_type=task_type,
g_min=g_min, g_max=g_max)
en_g_l, en_h_l = en_gh_list(g, h, en)
data_list = [(id_, (g_, h_)) for id_, g_, h_ in zip(sample_id, g, h)]
data_table = session.parallelize(data_list, 4, include_key=True)
en_table = packer.pack_and_encrypt(data_table)
collected_gh = list(en_table.collect())
return packer, en_g_l, en_h_l, en_table, collected_gh
@classmethod
def setUpClass(cls):
session.init("test_gh_packing")
cls.max_sample_num = 1000
cls.test_num = 10
cls.split_info_test_num = 200
key_length = 1024
sample_id = [i for i in range(cls.max_sample_num)]
# classification data
cls.g, cls.h = generate_bin_gh(cls.max_sample_num)
cls.p_en = PaillierEncrypt()
cls.p_en.generate_key(key_length)
cls.p_packer, cls.p_en_g_l, cls.p_en_h_l, cls.p_en_table, cls.p_collected_gh = \
cls.prepare_testing_data(cls.g, cls.h, cls.p_en, cls.max_sample_num, sample_id, consts.CLASSIFICATION)
cls.compressor = PackedGHCompressor(sync_para=False)
cls.compressor.compressor._padding_length, cls.compressor.compressor._capacity = \
cls.p_packer.packer.cipher_compress_suggest()
print('paillier compress para {}'.format(cls.p_packer.packer.cipher_compress_suggest()))
# regression data
cls.g_reg, cls.h_reg = generate_reg_gh(cls.max_sample_num, -1000, 1000)
cls.reg_p_packer, cls.reg_p_en_g_l, cls.reg_p_en_h_l, cls.reg_p_en_table, cls.reg_p_collected_gh = \
cls.prepare_testing_data(cls.g_reg, cls.h_reg, cls.p_en, cls.max_sample_num, sample_id, consts.REGRESSION,
g_min=-1000, g_max=1000)
cls.reg_compressor = PackedGHCompressor(sync_para=False)
cls.reg_compressor.compressor._padding_length, cls.reg_compressor.compressor._capacity = \
cls.reg_p_packer.packer.cipher_compress_suggest()
print('paillier compress para {}'.format(cls.p_packer.packer.cipher_compress_suggest()))
print('initialization done')
def run_gh_accumulate_test(self, test_num, collected_gh, en_g_l, en_h_l, packer, en, g, h, check=True):
print('{} test to run'.format(test_num))
for i in range(test_num):
print('executing test {}'.format(i))
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, g, h,
en_g_l,
en_h_l,
self.max_sample_num)
de_num = en.raw_decrypt(en_sum)
unpack_num = packer.packer.unpack_an_int(de_num, packer.packer.bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - sample_num * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
g_sum_2 = en.decrypt(en_g_sum)
h_sum_2 = en.decrypt(en_h_sum)
print(g_sum, h_sum)
print(g_sum_2, h_sum_2)
print(g_sum_, h_sum_)
g_sum, h_sum = truncate(g_sum), truncate(h_sum)
g_sum_, h_sum_ = truncate(g_sum_), truncate(h_sum_)
g_sum_2, h_sum_2 = truncate(g_sum_2), truncate(h_sum_2)
print(g_sum, h_sum)
print(g_sum_2, h_sum_2)
print(g_sum_, h_sum_)
if check:
# make sure packing result close to plaintext sum
self.assertTrue(g_sum_ == g_sum)
self.assertTrue(h_sum_ == h_sum)
print('passed')
def test_pack_gh_accumulate(self):
# test the correctness of gh packing(in comparision to plaintext)
# Paillier
self.run_gh_accumulate_test(self.test_num, self.p_collected_gh, self.p_en_g_l, self.p_en_h_l, self.p_packer,
self.p_en, self.g, self.h)
print('*' * 30)
print('test paillier done')
print('*' * 30)
def test_split_info_cipher_compress(self):
# test the correctness of cipher compressing
print('testing binary')
collected_gh = self.p_collected_gh
en_g_l = self.p_en_g_l
en_h_l = self.p_en_h_l
packer = self.p_packer
en = self.p_en
sp_list = []
g_sum_list, h_sum_list = [], []
pack_en_list = []
for i in range(self.split_info_test_num):
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, self.g, self.h,
en_g_l,
en_h_l,
self.max_sample_num)
sp = SplitInfo(sum_grad=en_sum, sum_hess=0, sample_count=sample_num)
sp_list.append(sp)
g_sum_list.append(g_sum)
h_sum_list.append(h_sum)
pack_en_list.append(en_sum)
print('generating split-info done')
packages = self.compressor.compress_split_info(sp_list[:-1], sp_list[-1])
print('package length is {}'.format(len(packages)))
unpack_rs = packer.decompress_and_unpack(packages)
case_id = 0
for s, g, h, en_gh in zip(unpack_rs, g_sum_list, h_sum_list, pack_en_list):
print('*' * 10)
print(case_id)
case_id += 1
de_num = en.raw_decrypt(en_gh)
unpack_num = packer.packer.unpack_an_int(de_num, packer.packer.bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - s.sample_count * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
print(s.sample_count)
print(s.sum_grad, g_sum_, g)
print(s.sum_hess, h_sum_, h)
# make sure cipher compress is correct
self.assertTrue(truncate(s.sum_grad) == truncate(g_sum_))
self.assertTrue(truncate(s.sum_hess) == truncate(h_sum_))
print('check passed')
def test_regression_cipher_compress(self):
# test the correctness of cipher compressing
print('testing regression')
collected_gh = self.reg_p_collected_gh
en_g_l = self.reg_p_en_g_l
en_h_l = self.reg_p_en_h_l
packer = self.reg_p_packer
en = self.p_en
sp_list = []
g_sum_list, h_sum_list = [], []
pack_en_list = []
for i in range(self.split_info_test_num):
g_sum, h_sum, en_sum, en_g_sum, en_h_sum, sample_num = make_random_sum(collected_gh, self.g_reg, self.h_reg,
en_g_l,
en_h_l,
self.max_sample_num)
sp = SplitInfo(sum_grad=en_sum, sum_hess=0, sample_count=sample_num)
sp_list.append(sp)
g_sum_list.append(g_sum)
h_sum_list.append(h_sum)
pack_en_list.append(en_sum)
print('generating split-info done')
packages = self.reg_compressor.compress_split_info(sp_list[:-1], sp_list[-1])
print('package length is {}'.format(len(packages)))
unpack_rs = packer.decompress_and_unpack(packages)
case_id = 0
for s, g, h, en_gh in zip(unpack_rs, g_sum_list, h_sum_list, pack_en_list):
print('*' * 10)
print(case_id)
case_id += 1
de_num = en.raw_decrypt(en_gh) # make sure packing result close to plaintext sum
unpack_num = packer.packer.unpack_an_int(de_num, packer.packer.bit_assignment[0])
g_sum_ = unpack_num[0] / fix_point_precision - s.sample_count * packer.g_offset
h_sum_ = unpack_num[1] / fix_point_precision
print(s.sample_count)
print(s.sum_grad, g_sum_, g)
print(s.sum_hess, h_sum_, h)
# make sure cipher compress is correct
self.assertTrue(truncate(s.sum_grad) == truncate(g_sum_))
self.assertTrue(truncate(s.sum_hess) == truncate(h_sum_))
print('check passed')
def test_regression_gh_packing(self):
# Paillier
self.run_gh_accumulate_test(
self.test_num,
self.reg_p_collected_gh,
self.reg_p_en_g_l,
self.reg_p_en_h_l,
self.reg_p_packer,
self.p_en,
self.g_reg,
self.h_reg,
check=False) # float error in regression is not controllable
@classmethod
def tearDownClass(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 11,648 | 37.193443 | 133 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/criterion_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from federatedml.ensemble import XgboostCriterion
from federatedml.util import consts
class TestXgboostCriterion(unittest.TestCase):
def setUp(self):
self.reg_lambda = 0.3
self.criterion = XgboostCriterion(reg_lambda=self.reg_lambda)
def test_init(self):
self.assertTrue(np.fabs(self.criterion.reg_lambda - self.reg_lambda) < consts.FLOAT_ZERO)
def test_split_gain(self):
node = [0.5, 0.6]
left = [0.1, 0.2]
right = [0.4, 0.4]
gain_all = node[0] * node[0] / (node[1] + self.reg_lambda)
gain_left = left[0] * left[0] / (left[1] + self.reg_lambda)
gain_right = right[0] * right[0] / (right[1] + self.reg_lambda)
split_gain = gain_left + gain_right - gain_all
self.assertTrue(np.fabs(self.criterion.split_gain(node, left, right) - split_gain) < consts.FLOAT_ZERO)
def test_node_gain(self):
grad = 0.5
hess = 6
gain = grad * grad / (hess + self.reg_lambda)
self.assertTrue(np.fabs(self.criterion.node_gain(grad, hess) - gain) < consts.FLOAT_ZERO)
def test_node_weight(self):
grad = 0.5
hess = 6
weight = -grad / (hess + self.reg_lambda)
self.assertTrue(np.fabs(self.criterion.node_weight(grad, hess) - weight) < consts.FLOAT_ZERO)
if __name__ == '__main__':
unittest.main()
| 2,008 | 33.637931 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/node_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from federatedml.ensemble import Node, SplitInfo
class TestNode(unittest.TestCase):
def setUp(self):
pass
def test_node(self):
param_dict = {"id": 5, "sitename": "test", "fid": 55, "bid": 555,
"weight": -1, "is_leaf": True, "sum_grad": 2, "sum_hess": 3,
"left_nodeid": 6, "right_nodeid": 7}
node = Node(id=5, sitename="test", fid=55, bid=555, weight=-1, is_leaf=True,
sum_grad=2, sum_hess=3, left_nodeid=6, right_nodeid=7)
for key in param_dict:
self.assertTrue(param_dict[key] == getattr(node, key))
class TestSplitInfo(unittest.TestCase):
def setUp(self):
pass
def test_splitinfo(self):
pass
param_dict = {"sitename": "testsplitinfo",
"best_fid": 23, "best_bid": 233,
"sum_grad": 2333, "sum_hess": 23333, "gain": 233333}
splitinfo = SplitInfo(sitename="testsplitinfo", best_fid=23, best_bid=233,
sum_grad=2333, sum_hess=23333, gain=233333)
for key in param_dict:
self.assertTrue(param_dict[key] == getattr(splitinfo, key))
if __name__ == '__main__':
unittest.main()
| 1,866 | 32.945455 | 84 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/test/feature_histogram_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from fate_arch.session import computing_session as session
from federatedml.ensemble import FeatureHistogram
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.util import consts
import copy
import numpy as np
import random
class TestFeatureHistogram(unittest.TestCase):
def setUp(self):
self.feature_histogram = FeatureHistogram()
session.init("test_feature_histogram")
data_insts = []
for i in range(1000):
indices = []
data = []
for j in range(10):
x = random.randint(0, 5)
if x != 0:
data.append(x)
indices.append(j)
sparse_vec = SparseVector(indices, data, shape=10)
data_insts.append((Instance(features=sparse_vec), (1, random.randint(0, 3))))
self.node_map = {0: 0, 1: 1, 2: 2, 3: 3}
self.data_insts = data_insts
self.data_bin = session.parallelize(data_insts, include_key=False, partition=16)
self.grad_and_hess_list = [(random.random(), random.random()) for i in range(1000)]
self.grad_and_hess = session.parallelize(self.grad_and_hess_list, include_key=False, partition=16)
bin_split_points = []
for i in range(10):
bin_split_points.append(np.array([i for i in range(6)]))
self.bin_split_points = np.array(bin_split_points)
self.bin_sparse = [0 for i in range(10)]
def test_accumulate_histogram(self):
data = [[[[random.randint(0, 10) for i in range(2)]
for j in range(3)]
for k in range(4)]
for r in range(5)]
histograms = copy.deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
histograms[i][j] = self.feature_histogram._tensor_histogram_cumsum(histograms[i][j])
for k in range(1, len(data[i][j])):
for r in range(len(data[i][j][k])):
data[i][j][k][r] += data[i][j][k - 1][r]
self.assertTrue(data[i][j][k][r] == histograms[i][j][k][r])
def test_calculate_histogram(self):
histograms = self.feature_histogram.calculate_histogram(
self.data_bin, self.grad_and_hess,
self.bin_split_points, self.bin_sparse,
node_map=self.node_map)
his2 = [[[[0 for i in range(3)]
for j in range(6)]
for k in range(10)]
for r in range(4)]
for i in range(1000):
grad, hess = self.grad_and_hess_list[i]
id = self.node_map[self.data_insts[i][1][1]]
for fid, bid in self.data_insts[i][0].features.get_all_data():
his2[id][fid][bid][0] += grad
his2[id][fid][bid][1] += hess
his2[id][fid][bid][2] += 1
for i in range(len(his2)):
for j in range(len(his2[i])):
his2[i][j] = self.feature_histogram._tensor_histogram_cumsum(his2[i][j])
for k in range(len(his2[i][j])):
for r in range(len(his2[i][j][k])):
self.assertTrue(np.fabs(his2[i][j][k][r] - histograms[i][j][k][r]) < consts.FLOAT_ZERO)
def test_aggregate_histogram(self):
fake_fid = 114
data1 = [[random.randint(0, 10) for i in range(2)] for j in range(3)]
data2 = [[random.randint(0, 10) for i in range(2)] for j in range(3)]
fid, agg_histograms = self.feature_histogram._hist_aggregate((fake_fid, data1), (fake_fid, data2))
for i in range(len(data1)):
for j in range(len(data1[i])):
data1[i][j] += data2[i][j]
self.assertTrue(data1[i][j] == agg_histograms[i][j])
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 4,570 | 38.068376 | 111 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/boosting/hetero_boosting.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
import abc
from federatedml.ensemble.boosting import Boosting
from federatedml.param.boosting_param import HeteroBoostingParam
from federatedml.secureprotol import PaillierEncrypt, IpclPaillierEncrypt
from federatedml.util import consts
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.util.classify_label_checker import RegressionLabelChecker
from federatedml.util import LOGGER
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.transfer_variable.transfer_class.hetero_boosting_transfer_variable import \
HeteroBoostingTransferVariable
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.statistic.data_overview import get_anonymous_header
class HeteroBoosting(Boosting, ABC):
def __init__(self):
super(HeteroBoosting, self).__init__()
self.encrypter = None
self.early_stopping_rounds = None
self.binning_class = QuantileBinning
self.model_param = HeteroBoostingParam()
self.transfer_variable = HeteroBoostingTransferVariable()
self.mode = consts.HETERO
def _init_model(self, param: HeteroBoostingParam):
LOGGER.debug('in hetero boosting, objective param is {}'.format(param.objective_param.objective))
super(HeteroBoosting, self)._init_model(param)
self.encrypt_param = param.encrypt_param
self.early_stopping_rounds = param.early_stopping_rounds
self.use_first_metric_only = param.use_first_metric_only
class HeteroBoostingGuest(HeteroBoosting, ABC):
def __init__(self):
super(HeteroBoostingGuest, self).__init__()
def _init_model(self, param):
super(HeteroBoostingGuest, self)._init_model(param)
def generate_encrypter(self):
LOGGER.info("generate encrypter")
if self.encrypt_param.method.lower() == consts.PAILLIER.lower():
self.encrypter = PaillierEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
elif self.encrypt_param.method.lower() == consts.PAILLIER_IPCL.lower():
self.encrypter = IpclPaillierEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
else:
raise NotImplementedError("unknown encrypt type {}".format(self.encrypt_param.method.lower()))
def check_label(self):
LOGGER.info("check label")
classes_ = []
num_classes, booster_dim = 1, 1
if self.task_type == consts.CLASSIFICATION:
num_classes, classes_ = ClassifyLabelChecker.validate_label(self.data_bin)
if num_classes > 2:
booster_dim = num_classes
range_from_zero = True
for _class in classes_:
try:
if 0 <= _class < len(classes_) and isinstance(_class, int):
continue
else:
range_from_zero = False
break
except BaseException:
range_from_zero = False
classes_ = sorted(classes_)
if not range_from_zero:
class_mapping = dict(zip(classes_, range(num_classes)))
self.y = self.y.mapValues(lambda _class: class_mapping[_class])
else:
RegressionLabelChecker.validate_label(self.data_bin)
return classes_, num_classes, booster_dim
def sync_booster_dim(self):
LOGGER.info("sync booster_dim to host")
self.transfer_variable.booster_dim.remote(self.booster_dim,
role=consts.HOST,
idx=-1)
def sync_stop_flag(self, stop_flag, num_round):
LOGGER.info("sync stop flag to host, boosting_core round is {}".format(num_round))
self.transfer_variable.stop_flag.remote(stop_flag,
role=consts.HOST,
idx=-1,
suffix=(num_round,))
def sync_predict_round(self, predict_round, ):
LOGGER.info("sync predict start round {}".format(predict_round))
self.transfer_variable.predict_start_round.remote(predict_round, role=consts.HOST, idx=-1, )
def prepare_warm_start(self, data_inst, classes):
# adjust parameter for warm start
warm_start_y_hat = self.predict(data_inst, ret_format='raw')
self.y_hat = warm_start_y_hat
self.start_round = len(self.boosting_model_list) // self.booster_dim
self.boosting_round += self.start_round
# check classes
assert set(classes).issubset(set(self.classes_)), 'warm start label alignment failed: cur labels {},' \
'previous model labels {}'.format(classes, self.classes_)
# check fid
self.feat_name_check(data_inst, self.feature_name_fid_mapping)
self.callback_warm_start_init_iter(self.start_round)
def fit(self, data_inst, validate_data=None):
LOGGER.info('begin to fit a hetero boosting model, model is {}'.format(self.model_name))
self.start_round = 0
self.on_training = True
self.data_inst = data_inst
to_process_data_inst = self.data_and_header_alignment(data_inst) if self.is_warm_start else data_inst
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.prepare_data(to_process_data_inst)
self.y = self.get_label(self.data_bin)
if not self.is_warm_start:
self.feature_name_fid_mapping = self.gen_feature_fid_mapping(data_inst.schema)
self.classes_, self.num_classes, self.booster_dim = self.check_label()
self.loss = self.get_loss_function()
self.y_hat, self.init_score = self.get_init_score(self.y, self.num_classes)
else:
classes_, num_classes, booster_dim = self.check_label()
self.prepare_warm_start(data_inst, classes_)
LOGGER.info('class index is {}'.format(self.classes_))
self.sync_booster_dim()
self.generate_encrypter()
self.callback_list.on_train_begin(data_inst, validate_data)
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
self.preprocess()
for epoch_idx in range(self.start_round, self.boosting_round):
LOGGER.info('cur epoch idx is {}'.format(epoch_idx))
self.callback_list.on_epoch_begin(epoch_idx)
for class_idx in range(self.booster_dim):
# fit a booster
model = self.fit_a_learner(epoch_idx, class_idx)
booster_meta, booster_param = model.get_model()
if booster_meta is not None and booster_param is not None:
self.booster_meta = booster_meta
self.boosting_model_list.append(booster_param)
# update predict score
cur_sample_weights = model.get_sample_weights()
self.y_hat = self.get_new_predict_score(self.y_hat, cur_sample_weights, dim=class_idx)
# compute loss
loss = self.compute_loss(self.y_hat, self.y, sample_weights=self.data_inst)
self.history_loss.append(loss)
LOGGER.info("round {} loss is {}".format(epoch_idx, loss))
self.callback_metric("loss",
"train",
[Metric(epoch_idx, loss)])
# check validation
validation_strategy = self.callback_list.get_validation_strategy()
if validation_strategy:
validation_strategy.set_precomputed_train_scores(self.score_to_predict_result(data_inst, self.y_hat))
self.callback_list.on_epoch_end(epoch_idx)
should_stop = False
if self.n_iter_no_change and self.check_convergence(loss):
should_stop = True
self.is_converged = True
self.sync_stop_flag(self.is_converged, epoch_idx)
if self.stop_training or should_stop:
break
self.postprocess()
self.callback_list.on_train_end()
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"Best": min(self.history_loss)}))
# get summary
self.set_summary(self.generate_summary())
@assert_io_num_rows_equal
def predict(self, data_inst):
# predict is implemented in hetero_secureboost
raise NotImplementedError('predict func is not implemented')
@abc.abstractmethod
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
raise NotImplementedError()
@abc.abstractmethod
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
raise NotImplementedError()
@abc.abstractmethod
def get_model_meta(self):
raise NotImplementedError()
@abc.abstractmethod
def get_model_param(self):
raise NotImplementedError()
@abc.abstractmethod
def set_model_meta(self, model_meta):
raise NotImplementedError()
@abc.abstractmethod
def set_model_param(self, model_param):
raise NotImplementedError()
class HeteroBoostingHost(HeteroBoosting, ABC):
def __init__(self):
super(HeteroBoostingHost, self).__init__()
def _init_model(self, param):
super(HeteroBoostingHost, self)._init_model(param)
def sync_booster_dim(self):
LOGGER.info("sync booster dim from guest")
self.booster_dim = self.transfer_variable.booster_dim.get(idx=0)
LOGGER.info("booster dim is %d" % self.booster_dim)
def sync_stop_flag(self, num_round):
LOGGER.info("sync stop flag from guest, boosting_core round is {}".format(num_round))
stop_flag = self.transfer_variable.stop_flag.get(idx=0,
suffix=(num_round,))
return stop_flag
def sync_predict_start_round(self, ):
return self.transfer_variable.predict_start_round.get(idx=0, )
def prepare_warm_start(self, data_inst):
self.predict(data_inst)
self.callback_warm_start_init_iter(self.start_round)
self.feat_name_check(data_inst, self.feature_name_fid_mapping)
self.start_round = len(self.boosting_model_list) // self.booster_dim
self.boosting_round += self.start_round
def set_anonymous_header(self, data_inst):
if not self.anonymous_header:
self.anonymous_header = {v: k for k, v in zip(get_anonymous_header(data_inst), data_inst.schema['header'])}
def fit(self, data_inst, validate_data=None):
LOGGER.info('begin to fit a hetero boosting model, model is {}'.format(self.model_name))
self.start_round = 0
self.on_training = True
to_process_data_inst = self.data_and_header_alignment(data_inst) if self.is_warm_start else data_inst
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.prepare_data(to_process_data_inst)
self.set_anonymous_header(to_process_data_inst)
if self.is_warm_start:
self.prepare_warm_start(data_inst)
else:
self.feature_name_fid_mapping = self.gen_feature_fid_mapping(data_inst.schema)
self.sync_booster_dim()
self.callback_list.on_train_begin(data_inst, validate_data)
self.preprocess()
for epoch_idx in range(self.start_round, self.boosting_round):
LOGGER.info('cur epoch idx is {}'.format(epoch_idx))
self.callback_list.on_epoch_begin(epoch_idx)
for class_idx in range(self.booster_dim):
# fit a booster
model = self.fit_a_learner(epoch_idx, class_idx) # need to implement
booster_meta, booster_param = model.get_model()
if booster_meta is not None and booster_param is not None:
self.booster_meta = booster_meta
self.boosting_model_list.append(booster_param)
validation_strategy = self.callback_list.get_validation_strategy()
if validation_strategy:
validation_strategy.set_precomputed_train_scores(None)
self.callback_list.on_epoch_end(epoch_idx)
should_stop = self.sync_stop_flag(epoch_idx)
self.is_converged = should_stop
if should_stop or self.stop_training:
break
self.postprocess()
self.callback_list.on_train_end()
self.set_summary(self.generate_summary())
def lazy_predict(self, data_inst):
LOGGER.info('running guest lazy prediction')
data_inst = self.data_alignment(data_inst)
init_score = self.init_score
self.predict_y_hat = data_inst.mapValues(lambda v: init_score)
rounds = len(self.boosting_model_list) // self.booster_dim
predict_start_round = self.sync_predict_start_round()
for idx in range(predict_start_round, rounds):
for booster_idx in range(self.booster_dim):
model = self.load_learner(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
model.predict(data_inst)
LOGGER.debug('lazy prediction finished')
def predict(self, data_inst):
LOGGER.info('using default lazy prediction')
self.lazy_predict(data_inst)
@abc.abstractmethod
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
raise NotImplementedError()
@abc.abstractmethod
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
raise NotImplementedError()
@abc.abstractmethod
def get_model_meta(self):
raise NotImplementedError()
@abc.abstractmethod
def get_model_param(self):
raise NotImplementedError()
@abc.abstractmethod
def set_model_meta(self, model_meta):
raise NotImplementedError()
@abc.abstractmethod
def set_model_param(self, model_param):
raise NotImplementedError()
| 15,350 | 37.961929 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/boosting/predict_cache.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PredictDataCache(object):
def __init__(self):
self._data_map = {}
def predict_data_at(self, dataset_key, round):
if dataset_key not in self._data_map:
return None
return self._data_map[dataset_key].data_at(round)
def predict_data_last_round(self, dataset_key):
if dataset_key not in self._data_map:
return 0 # start from 0
return self._data_map[dataset_key].get_last_round()
@staticmethod
def get_data_key(data):
return id(data)
def add_data(self, dataset_key, f, cur_boosting_round):
if dataset_key not in self._data_map:
self._data_map[dataset_key] = DataNode()
self._data_map[dataset_key].add_data(f, cur_boosting_round)
class DataNode(object):
def __init__(self):
self._boost_round = None
self._f = None
self._round_idx_map = {}
self._idx = 0
def get_last_round(self):
return self._boost_round
def data_at(self, round):
if round not in self._round_idx_map:
return None
return self._f.mapValues(lambda f_list: f_list[self._round_idx_map[round]])
def add_data(self, f, cur_round_num):
if self._boost_round is None:
self._boost_round = cur_round_num
self._idx = 0
self._f = f.mapValues(lambda pred: [pred])
else:
self._boost_round = cur_round_num
self._idx += 1
self._f = self._f.join(f, lambda pre_scores, score: pre_scores + [score])
self._round_idx_map[self._boost_round] = self._idx
| 2,276 | 30.191781 | 85 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/boosting/homo_boosting.py
|
from abc import ABC
import abc
import numpy as np
from federatedml.ensemble.boosting.boosting import Boosting
from federatedml.feature.homo_feature_binning.homo_split_points import HomoFeatureBinningClient, \
HomoFeatureBinningServer
from federatedml.util.classify_label_checker import ClassifyLabelChecker, RegressionLabelChecker
from federatedml.util import consts
from federatedml.util.homo_label_encoder import HomoLabelEncoderClient, HomoLabelEncoderArbiter
from federatedml.transfer_variable.transfer_class.homo_boosting_transfer_variable import HomoBoostingTransferVariable
from typing import List
from federatedml.feature.fate_element_type import NoneType
from federatedml.util import LOGGER
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.boosting_param import HomoSecureBoostParam
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.feature.homo_feature_binning import recursive_query_binning
from federatedml.param.feature_binning_param import HomoFeatureBinningParam
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer
class HomoBoostArbiterAggregator(object):
def __init__(self, ):
self.aggregator = SecureAggregatorServer(communicate_match_suffix='homo_sbt')
def aggregate_loss(self, suffix):
global_loss = self.aggregator.aggregate_loss(suffix)
return global_loss
def broadcast_converge_status(self, func, loss, suffix):
is_converged = func(*loss)
self.aggregator.broadcast_converge_status(is_converged, suffix=suffix)
return is_converged
class HomoBoostClientAggregator(object):
def __init__(self, sample_num):
self.aggregator = SecureAggregatorClient(
communicate_match_suffix='homo_sbt', aggregate_weight=sample_num)
def send_local_loss(self, loss, suffix):
self.aggregator.send_loss(loss, suffix)
def get_converge_status(self, suffix):
return self.aggregator.get_converge_status(suffix)
class HomoBoostingClient(Boosting, ABC):
def __init__(self):
super(HomoBoostingClient, self).__init__()
self.transfer_inst = HomoBoostingTransferVariable()
self.model_param = HomoSecureBoostParam()
self.aggregator = None
self.binning_obj = None
self.mode = consts.HOMO
def federated_binning(self, data_instance):
binning_param = HomoFeatureBinningParam(method=consts.RECURSIVE_QUERY, bin_num=self.bin_num,
error=self.binning_error)
if self.use_missing:
self.binning_obj = recursive_query_binning.Client(params=binning_param, abnormal_list=[NoneType()],
role=self.role)
LOGGER.debug('use missing')
else:
self.binning_obj = recursive_query_binning.Client(params=binning_param, role=self.role)
self.binning_obj.fit_split_points(data_instance)
return self.binning_obj.convert_feature_to_bin(data_instance)
def check_label(self, data_inst, ) -> List[int]:
LOGGER.debug('checking labels')
classes_ = None
if self.task_type == consts.CLASSIFICATION:
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_inst)
else:
RegressionLabelChecker.validate_label(data_inst)
return classes_
@staticmethod
def check_label_starts_from_zero(aligned_labels):
"""
in current version, labels should start from 0 and
are consecutive integers
"""
if aligned_labels[0] != 0:
raise ValueError('label should starts from 0')
for prev, aft in zip(aligned_labels[:-1], aligned_labels[1:]):
if prev + 1 != aft:
raise ValueError('labels should be a sequence of consecutive integers, '
'but got {} and {}'.format(prev, aft))
def sync_feature_num(self):
self.transfer_inst.feature_number.remote(self.feature_num, role=consts.ARBITER, idx=-1, suffix=('feat_num',))
def sync_start_round_and_end_round(self):
self.transfer_inst.start_and_end_round.remote((self.start_round, self.boosting_round),
role=consts.ARBITER, idx=-1)
def data_preporcess(self, data_inst):
# transform to sparse and binning
data_inst = self.data_alignment(data_inst)
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.federated_binning(data_inst)
def fit(self, data_inst, validate_data=None):
# init federation obj
self.aggregator = HomoBoostClientAggregator(sample_num=data_inst.count())
# binning
self.data_preporcess(data_inst)
self.data_inst = data_inst
# fid mapping and warm start check
if not self.is_warm_start:
self.feature_name_fid_mapping = self.gen_feature_fid_mapping(data_inst.schema)
else:
self.feat_name_check(data_inst, self.feature_name_fid_mapping)
# set feature_num
self.feature_num = self.bin_split_points.shape[0]
# sync feature num
self.sync_feature_num()
# initialize validation strategy
self.callback_list.on_train_begin(data_inst, validate_data)
# check labels
local_classes = self.check_label(self.data_bin)
# set start round
self.start_round = len(self.boosting_model_list) // self.booster_dim
# sync label class and set y
if self.task_type == consts.CLASSIFICATION:
aligned_label, new_label_mapping = HomoLabelEncoderClient().label_alignment(local_classes)
if self.is_warm_start:
assert set(aligned_label) == set(self.classes_), 'warm start label alignment failed, differences: {}'. \
format(set(aligned_label).symmetric_difference(set(self.classes_)))
self.classes_ = aligned_label
self.check_label_starts_from_zero(self.classes_)
# set labels
self.num_classes = len(new_label_mapping)
LOGGER.info('aligned labels are {}, num_classes is {}'.format(aligned_label, self.num_classes))
self.y = self.data_bin.mapValues(lambda instance: new_label_mapping[instance.label])
# set tree dimension
self.booster_dim = self.num_classes if self.num_classes > 2 else 1
else:
self.y = self.data_bin.mapValues(lambda instance: instance.label)
# set loss function
self.loss = self.get_loss_function()
# set y_hat_val, if warm start predict cur samples
if self.is_warm_start:
self.y_hat = self.predict(data_inst, ret_format='raw')
self.boosting_round += self.start_round
self.callback_warm_start_init_iter(self.start_round)
else:
if self.task_type == consts.REGRESSION:
self.init_score = np.array([0]) # make sure that every local model has same init scores
self.y_hat = self.y.mapValues(lambda x: np.array([0]))
else:
self.y_hat, self.init_score = self.get_init_score(self.y, self.num_classes)
# sync start round and end round
self.sync_start_round_and_end_round()
self.preprocess()
LOGGER.info('begin to fit a boosting tree')
for epoch_idx in range(self.start_round, self.boosting_round):
LOGGER.info('cur epoch idx is {}'.format(epoch_idx))
self.callback_list.on_epoch_begin(epoch_idx)
for class_idx in range(self.booster_dim):
# fit a booster
model = self.fit_a_learner(epoch_idx, class_idx)
booster_meta, booster_param = model.get_model()
if booster_meta is not None and booster_param is not None:
self.booster_meta = booster_meta
self.boosting_model_list.append(booster_param)
# update predict score
cur_sample_weights = model.get_sample_weights()
self.y_hat = self.get_new_predict_score(self.y_hat, cur_sample_weights, dim=class_idx)
local_loss = self.compute_loss(self.y_hat, self.y, self.data_inst)
self.aggregator.send_local_loss(local_loss, suffix=(epoch_idx,))
validation_strategy = self.callback_list.get_validation_strategy()
if validation_strategy:
validation_strategy.set_precomputed_train_scores(self.score_to_predict_result(data_inst, self.y_hat))
self.callback_list.on_epoch_end(epoch_idx)
# check stop flag if n_iter_no_change is True
if self.n_iter_no_change:
should_stop = self.aggregator.get_converge_status(suffix=(str(epoch_idx),))
if should_stop:
LOGGER.info('n_iter_no_change stop triggered')
break
self.postprocess()
self.callback_list.on_train_end()
self.set_summary(self.generate_summary())
@assert_io_num_rows_equal
def predict(self, data_inst):
# predict is implemented in homo_secureboost
raise NotImplementedError('predict func is not implemented')
@abc.abstractmethod
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
raise NotImplementedError()
@abc.abstractmethod
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
raise NotImplementedError()
class HomoBoostingArbiter(Boosting, ABC):
def __init__(self):
super(HomoBoostingArbiter, self).__init__()
self.transfer_inst = HomoBoostingTransferVariable()
self.check_convergence_func = None
self.aggregator = None
self.binning_obj = None
def federated_binning(self, ):
binning_param = HomoFeatureBinningParam(method=consts.RECURSIVE_QUERY, bin_num=self.bin_num,
error=self.binning_error)
if self.use_missing:
self.binning_obj = recursive_query_binning.Server(binning_param, abnormal_list=[NoneType()])
else:
self.binning_obj = recursive_query_binning.Server(binning_param, abnormal_list=[])
self.binning_obj.fit_split_points(None)
def sync_feature_num(self):
feature_num_list = self.transfer_inst.feature_number.get(idx=-1, suffix=('feat_num',))
for num in feature_num_list[1:]:
assert feature_num_list[0] == num
return feature_num_list[0]
def sync_start_round_and_end_round(self):
r_list = self.transfer_inst.start_and_end_round.get(-1)
LOGGER.info('get start/end round from clients: {}'.format(r_list))
self.start_round, self.boosting_round = r_list[0]
def check_label(self):
pass
def fit(self, data_inst, validate_data=None):
# init binning obj
self.aggregator = HomoBoostArbiterAggregator()
self.federated_binning()
# initializing
self.feature_num = self.sync_feature_num()
if self.task_type == consts.CLASSIFICATION:
label_mapping = HomoLabelEncoderArbiter().label_alignment()
LOGGER.info('label mapping is {}'.format(label_mapping))
self.booster_dim = len(label_mapping) if len(label_mapping) > 2 else 1
if self.n_iter_no_change:
self.check_convergence_func = converge_func_factory("diff", self.tol)
# sync start round and end round
self.sync_start_round_and_end_round()
LOGGER.info('begin to fit a boosting tree')
self.preprocess()
for epoch_idx in range(self.start_round, self.boosting_round):
LOGGER.info('cur epoch idx is {}'.format(epoch_idx))
for class_idx in range(self.booster_dim):
model = self.fit_a_learner(epoch_idx, class_idx)
global_loss = self.aggregator.aggregate_loss(suffix=(epoch_idx,))
self.history_loss.append(global_loss)
LOGGER.debug('cur epoch global loss is {}'.format(global_loss))
self.callback_metric("loss",
"train",
[Metric(epoch_idx, global_loss)])
if self.n_iter_no_change:
should_stop = self.aggregator.broadcast_converge_status(self.check_convergence, (global_loss,),
suffix=(epoch_idx,))
LOGGER.debug('stop flag sent')
if should_stop:
break
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"Best": min(self.history_loss)}))
self.postprocess()
self.callback_list.on_train_end()
self.set_summary(self.generate_summary())
def predict(self, data_inst=None):
LOGGER.debug('arbiter skip prediction')
@abc.abstractmethod
def fit_a_learner(self, epoch_idx: int, booster_dim: int):
raise NotImplementedError()
@abc.abstractmethod
def load_learner(self, model_meta, model_param, epoch_idx, booster_idx):
raise NotImplementedError()
| 13,545 | 39.195846 | 120 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/boosting/boosting.py
|
import copy
import functools
import typing
from abc import ABC
import abc
from numpy import random
import numpy as np
from federatedml.param.boosting_param import BoostingParam, ObjectiveParam
from federatedml.param.predict_param import PredictParam
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.model_selection import start_cross_validation
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.feature.fate_element_type import NoneType
from federatedml.ensemble.basic_algorithms import BasicAlgorithms
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import FairLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import HuberLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import LeastAbsoluteErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import LeastSquaredErrorLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import LogCoshLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import TweedieLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import SigmoidBinaryCrossEntropyLoss
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.loss import SoftmaxCrossEntropyLoss
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.ensemble.boosting.predict_cache import PredictDataCache
from federatedml.statistic import data_overview
from federatedml.optim.convergence import converge_func_factory
from federatedml.statistic.data_overview import get_anonymous_header
from federatedml.util import LOGGER
class Boosting(ModelBase, ABC):
def __init__(self):
super(Boosting, self).__init__()
# input hyper parameter
self.task_type = None
self.learning_rate = None
self.start_round = None
self.boosting_round = None
self.n_iter_no_change = None
self.tol = 0.0
self.bin_num = None
self.calculated_mode = None
self.cv_param = None
self.validation_freqs = None
self.feature_name_fid_mapping = {}
self.mode = None
self.predict_param = PredictParam()
self.objective_param = ObjectiveParam()
self.model_param = BoostingParam()
self.subsample_feature_rate = 1.0
self.subsample_random_seed = None
self.model_name = 'default' # model name
self.early_stopping_rounds = None
self.use_first_metric_only = False
self.binning_error = consts.DEFAULT_RELATIVE_ERROR
# running variable
# random seed
self.random_seed = 100
# feat anonymous header
self.anonymous_header = None
# data
self.data_inst = None # original input data
self.binning_class = None # class used for data binning
self.binning_obj = None # instance of self.binning_class
self.data_bin = None # data with transformed features
self.bin_split_points = None # feature split points
self.bin_sparse_points = None # feature sparse points
self.use_missing = False # should handle missing value or not
self.zero_as_missing = False # set missing value as value or not
# booster
self.booster_dim = 1 # booster dimension
self.booster_meta = None # booster's hyper parameters
self.boosting_model_list = [] # list hol\ds boosters
# training
self.feature_num = None # feature number
self.init_score = None # init score
self.num_classes = 1 # number of classes
self.convergence = None # function to check loss convergence
self.classes_ = [] # list of class indices
self.y = None # label
self.y_hat = None # accumulated predict value
self.loss = None # loss func
self.predict_y_hat = None # accumulated predict value for predicting mode
self.history_loss = [] # list holds loss history
self.metrics = None
self.is_converged = False
self.is_warm_start = False # warm start parameter
self.on_training = False
# cache and header alignment
self.predict_data_cache = PredictDataCache()
self.data_alignment_map = {}
# federation
self.transfer_variable = None
def _init_model(self, boosting_param: BoostingParam):
self.task_type = boosting_param.task_type
self.objective_param = boosting_param.objective_param
self.learning_rate = boosting_param.learning_rate
self.boosting_round = boosting_param.num_trees
self.n_iter_no_change = boosting_param.n_iter_no_change
self.tol = boosting_param.tol
self.bin_num = boosting_param.bin_num
self.predict_param = boosting_param.predict_param
self.cv_param = boosting_param.cv_param
self.validation_freqs = boosting_param.validation_freqs
self.metrics = boosting_param.metrics
self.subsample_feature_rate = boosting_param.subsample_feature_rate
self.binning_error = boosting_param.binning_error
self.is_warm_start = self.component_properties.is_warm_start
LOGGER.debug('warm start is {}'.format(self.is_warm_start))
if boosting_param.random_seed is not None:
self.random_seed = boosting_param.random_seed
# initialize random seed here
LOGGER.debug('setting random seed done, random seed is {}'.format(self.random_seed))
np.random.seed(self.random_seed)
"""
Data Processing
"""
@staticmethod
def data_format_transform(row):
"""
transform data into sparse format
"""
if type(row.features).__name__ != consts.SPARSE_VECTOR:
feature_shape = row.features.shape[0]
indices = []
data = []
for i in range(feature_shape):
if np.isnan(row.features[i]):
indices.append(i)
data.append(NoneType())
elif np.abs(row.features[i]) < consts.FLOAT_ZERO:
continue
else:
indices.append(i)
data.append(row.features[i])
new_row = copy.deepcopy(row)
new_row.features = SparseVector(indices, data, feature_shape)
return new_row
else:
sparse_vec = row.features.get_sparse_vector()
replace_key = []
for key in sparse_vec:
if sparse_vec.get(key) == NoneType() or np.isnan(sparse_vec.get(key)):
replace_key.append(key)
if len(replace_key) == 0:
return row
else:
new_row = copy.deepcopy(row)
new_sparse_vec = new_row.features.get_sparse_vector()
for key in replace_key:
new_sparse_vec[key] = NoneType()
return new_row
def convert_feature_to_bin(self, data_instance, handle_missing_value=False):
"""
convert bin index to real value
"""
LOGGER.info("convert feature to bins")
param_obj = FeatureBinningParam(bin_num=self.bin_num, error=self.binning_error)
if handle_missing_value:
self.binning_obj = self.binning_class(param_obj, abnormal_list=[NoneType()], )
else:
self.binning_obj = self.binning_class(param_obj)
self.binning_obj.fit_split_points(data_instance)
rs = self.binning_obj.convert_feature_to_bin(data_instance)
LOGGER.info("convert feature to bins over")
return rs
def sample_valid_features(self):
LOGGER.info("sample valid features")
self.feature_num = self.bin_split_points.shape[0]
choose_feature = random.choice(range(0, self.feature_num),
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in choose_feature:
valid_features[fid] = True
return valid_features
@staticmethod
def data_alignment(data_inst):
"""
align data: abnormal detection and transform data to sparse format
"""
abnormal_detection.empty_table_detection(data_inst)
abnormal_detection.empty_feature_detection(data_inst)
schema = data_inst.schema
new_data_inst = data_inst.mapValues(lambda row: Boosting.data_format_transform(row))
new_data_inst.schema = schema
return new_data_inst
def data_and_header_alignment(self, data_inst):
"""
turn data into sparse and align header/ align data table header
"""
cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)
if cache_dataset_key in self.data_alignment_map:
processed_data = self.data_alignment_map[cache_dataset_key]
else:
data_inst_tmp = self.data_alignment(data_inst)
header = [None] * len(self.feature_name_fid_mapping)
for idx, col in self.feature_name_fid_mapping.items():
header[idx] = col
processed_data = data_overview.header_alignment(data_inst_tmp, header,
pre_anonymous_header=get_anonymous_header(data_inst))
self.data_alignment_map[cache_dataset_key] = processed_data
return processed_data
@staticmethod
def gen_feature_fid_mapping(schema):
"""
generate {idx: feature_name} mapping
"""
header = schema.get("header")
feature_name_fid_mapping = dict(zip(range(len(header)), header))
LOGGER.debug("fid_mapping is {}".format(feature_name_fid_mapping))
return feature_name_fid_mapping
def prepare_data(self, data_inst):
"""
prepare data: data alignment, and transform feature to bin id
Args:
data_inst: training data
Returns: data_bin, data_split_points, data_sparse_point
"""
# to sprase vec
data_inst = self.data_alignment(data_inst)
# binning
return self.convert_feature_to_bin(data_inst, self.use_missing)
@abc.abstractmethod
def check_label(self, *args) -> typing.Tuple[typing.List[int], int, int]:
"""
Returns: get classes indices, class number and booster dimension and class
"""
raise NotImplementedError()
@staticmethod
def get_label(data_bin):
"""
extract y label from Table
"""
y = data_bin.mapValues(lambda instance: instance.label)
return y
"""
Functions
"""
def cross_validation(self, data_instances):
return start_cross_validation.run(self, data_instances)
def feat_name_check(self, data_inst, feat_name_fid_mapping):
previous_model_feat_name = set(feat_name_fid_mapping.values())
cur_data_feat_name = set(data_inst.schema['header'])
assert previous_model_feat_name == cur_data_feat_name, 'feature alignment failed, diff: {}' \
.format(previous_model_feat_name.symmetric_difference(cur_data_feat_name))
LOGGER.debug('warm start feat name {}, {}'.format(previous_model_feat_name, cur_data_feat_name))
def get_loss_function(self):
loss_type = self.objective_param.objective
params = self.objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
loss_func = SigmoidBinaryCrossEntropyLoss()
else:
loss_func = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
loss_func = LeastSquaredErrorLoss()
elif loss_type == "lae":
loss_func = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
loss_func = HuberLoss(params[0])
elif loss_type == "fair":
loss_func = FairLoss(params[0])
elif loss_type == "tweedie":
loss_func = TweedieLoss(params[0])
elif loss_type == "log_cosh":
loss_func = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
return loss_func
def get_metrics_param(self):
"""
this interface gives evaluation type. Will be called by validation strategy
"""
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
return EvaluateParam(eval_type="binary",
pos_label=self.classes_[1], metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def compute_loss(self, y_hat, y, sample_weights=None):
"""
compute loss given predicted y and real y
"""
if self.task_type == consts.CLASSIFICATION:
loss_method = self.loss
y_predict = y_hat.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(y, y_predict, sample_weights)
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["lse", "lae", "logcosh", "log_cosh", "huber"]:
loss_method = self.loss
loss = loss_method.compute_loss(y, y_hat, sample_weights)
elif self.objective_param.objective in ['tweedie']:
loss_method = self.loss
y_predict = y_hat.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(y, y_predict, sample_weights)
return float(loss)
def check_convergence(self, loss):
"""
check if the loss converges
"""
LOGGER.info("check convergence")
if self.convergence is None:
self.convergence = converge_func_factory("diff", self.tol)
return self.convergence.is_converge(loss)
@staticmethod
def accumulate_y_hat(val, new_val, lr=0.1, idx=0):
# vector sum
if isinstance(new_val, np.ndarray) and len(new_val) == len(val):
return val + new_val * lr
# accumulate by dimension
z_vec = np.zeros(len(val))
z_vec[idx] = lr * new_val
return z_vec + val
def generate_flowid(self, round_num, dim):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, dim]))
def get_new_predict_score(self, y_hat, cur_sample_weights, dim=0):
func = functools.partial(self.accumulate_y_hat, lr=self.learning_rate, idx=dim)
return y_hat.join(cur_sample_weights, func)
def _get_cv_param(self):
self.model_param.cv_param.role = self.role
self.model_param.cv_param.mode = self.mode
return self.model_param.cv_param
"""
fit and predict
"""
@abc.abstractmethod
def fit(self, data_inst, validate_data=None):
raise NotImplementedError()
@abc.abstractmethod
def predict(self, data_inst):
raise NotImplementedError()
@abc.abstractmethod
def generate_summary(self) -> dict:
"""
return model summary
"""
raise NotImplementedError()
"""
Training Procedure
"""
def get_init_score(self, y, num_classes: int):
if num_classes > 2:
y_hat, init_score = self.loss.initialize(y, num_classes)
else:
y_hat, init_score = self.loss.initialize(y)
return y_hat, init_score
@abc.abstractmethod
def fit_a_learner(self, *args) -> BasicAlgorithms:
"""
fit a booster and return it
"""
raise NotImplementedError()
"""
Prediction Procedure
"""
@abc.abstractmethod
def load_learner(self, *args):
"""
load a booster
"""
raise NotImplementedError()
def score_to_predict_result(self, data_inst, y_hat):
"""
given binary/multi-class/regression prediction scores, outputs result in standard format
"""
predicts = None
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
predicts = y_hat.mapValues(lambda f: float(loss_method.predict(f)))
else:
predicts = y_hat.mapValues(lambda f: loss_method.predict(f).tolist())
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["tweedie"]:
predicts = y_hat.mapValues(lambda f: [float(loss_method.predict(f))])
elif self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair"]:
predicts = y_hat
else:
raise NotImplementedError("objective {} not supprted yet".format(self.objective_param.objective))
if self.task_type == consts.CLASSIFICATION:
predict_result = self.predict_score_to_output(data_inst, predict_score=predicts, classes=self.classes_,
threshold=self.predict_param.threshold)
elif self.task_type == consts.REGRESSION:
predicts = predicts.mapValues(lambda x: x[0])
predict_result = self.predict_score_to_output(data_inst, predict_score=predicts, classes=None)
else:
raise NotImplementedError("task type {} not supported yet".format(self.task_type))
return predict_result
"""
Model IO
"""
@abc.abstractmethod
def get_model_meta(self):
raise NotImplementedError()
@abc.abstractmethod
def get_model_param(self):
raise NotImplementedError()
@abc.abstractmethod
def set_model_meta(self, model_meta):
raise NotImplementedError()
@abc.abstractmethod
def set_model_param(self, model_param):
raise NotImplementedError()
def preprocess(self):
pass
def postprocess(self):
pass
def get_cur_model(self):
meta_name, meta_protobuf = self.get_model_meta()
param_name, param_protobuf = self.get_model_param()
return {meta_name: meta_protobuf,
param_name: param_protobuf
}
def export_model(self):
if self.need_cv:
return None
return self.get_cur_model()
def load_model(self, model_dict, model_key="model"):
model_param = None
model_meta = None
for _, value in model_dict[model_key].items():
for model in value:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
def predict_proba(self, data_inst):
pass
def save_data(self):
return self.data_output
def save_model(self):
pass
| 19,801 | 36.291902 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/ensemble/boosting/__init__.py
|
from federatedml.ensemble.boosting.boosting import Boosting
from federatedml.ensemble.boosting.hetero_boosting import HeteroBoostingGuest, HeteroBoostingHost
__all__ = ["Boosting", "HeteroBoostingGuest", "HeteroBoostingHost"]
| 227 | 44.6 | 97 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
from pathlib import Path
import inspect
def get_proto_buffer_class(buffer_name):
package_base_path = Path(__file__).absolute().parent.parent.parent
package_path = Path(__file__).absolute().parent.joinpath("generated")
for f in package_path.glob("*.py"):
module_rel_path = package_path.joinpath(f.stem).relative_to(package_base_path)
module_path = f"{module_rel_path}".replace("/", ".")
proto_module = importlib.import_module(module_path)
for name, obj in inspect.getmembers(proto_module):
if inspect.isclass(obj) and name == buffer_name:
return obj
raise ModuleNotFoundError(buffer_name)
def parse_pb_buffer(pb_name, pb_buffer):
pb_object = get_proto_buffer_class(pb_name)()
pb_object.ParseFromString(pb_buffer)
return pb_object
def deserialize_models(model_input):
for model_type, models in model_input.items():
for cpn_name, cpn_models in models.items():
for model_name, (pb_name, pb_buffer) in cpn_models.items():
model_input[model_type][cpn_name][model_name] = parse_pb_buffer(pb_name, pb_buffer)
| 1,769 | 37.478261 | 99 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/component_converter.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class ComponentConverterBase(object):
"""Base class representing a component converter
A component converter expects a model dict format that
contains "XXXMeta" and "XXXParam" as keys.
"""
@staticmethod
def get_target_modules():
"""Returns the component model type that this converter supports.
"""
return []
def convert(self, model_dict):
return model_dict
| 1,039 | 30.515152 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/homo_model_convert.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import inspect
import os
from federatedml.util import LOGGER
from .component_converter import ComponentConverterBase
SKLEARN_FILENAME = "sklearn.joblib"
PYTORCH_FILENAME = "pytorch.pth"
TF_DIRNAME = "tensorflow_saved_model"
LGB_FILENAME = "lgb.txt"
def _get_component_converter(module_name: str,
framework_name: str):
if framework_name in ["tensorflow", "tf", "tf_keras"]:
framework_name = "tf_keras"
elif framework_name in ["pytorch", "torch"]:
framework_name = "pytorch"
elif framework_name in ["sklearn", "scikit-learn"]:
framework_name = "sklearn"
elif framework_name in ['lightgbm']:
framework_name = 'lightgbm'
package_name = "." + framework_name
parent_package = importlib.import_module(package_name, __package__)
parent_package_path = os.path.dirname(os.path.realpath(parent_package.__file__))
for f in os.listdir(parent_package_path):
if f.startswith('.') or f.startswith('_'):
continue
if not f.endswith('.py'):
continue
proto_module = importlib.import_module("." + f.rstrip('.py'), parent_package.__name__)
for name, obj in inspect.getmembers(proto_module):
if inspect.isclass(obj) and issubclass(obj, ComponentConverterBase):
for module in obj.get_target_modules():
if module.lower() == module_name.lower():
return framework_name, obj()
return None, None
def get_default_target_framework(model_contents: dict,
module_name: str):
"""
Returns the name of a supported ML framework based on the
original FATE model module name and model contents.
:param model_contents: the model content of the FATE model
:param module_name: The module name, typically as HomoXXXX.
:return: the corresponding framework name that this model can be converted to.
"""
framework_name = None
if module_name == "HomoLR":
framework_name = "sklearn"
elif module_name == 'HomoNN':
# in FATE-1.10 currently support pytorch only
framework_name = "pytorch"
# if model_contents['HomoNNModelMeta'].params.config_type == "pytorch":
# framework_name = "pytorch"
# else:
# framework_name = "tf_keras"
elif module_name.lower() == 'homosecureboost':
framework_name = 'lightgbm'
else:
LOGGER.debug(
f"Module {module_name} is not a supported homogeneous model")
return framework_name
def model_convert(model_contents: dict,
module_name: str,
framework_name=None):
"""Convert a Homo model component into format of a common ML framework
:param model_contents: The model dict un-serialized from the model protobuf.
:param module_name: The module name, typically as HomoXXXX.
:param framework_name: The wanted framework, e.g. "sklearn", "pytorch", etc.
If not specified, the target framework will be chosen
automatically.
:return: the converted framework name and a instance of the model object from
the specified framework.
"""
if not framework_name:
framework_name = get_default_target_framework(
model_contents, module_name)
if not framework_name:
return None, None
target_framework, component_converter = _get_component_converter(
module_name, framework_name)
if not component_converter:
LOGGER.warn(
f"Module {module_name} cannot be converted to framework {framework_name}")
return None, None
LOGGER.info(
f"Converting {module_name} module to a model of framework {target_framework}")
return target_framework, component_converter.convert(model_contents)
def _get_model_saver_loader(framework_name: str):
if framework_name in ["sklearn", "scikit-learn"]:
import joblib
return joblib.dump, joblib.load, SKLEARN_FILENAME
elif framework_name in ["pytorch", "torch"]:
import torch
return torch.save, torch.load, PYTORCH_FILENAME
elif framework_name in ["tensorflow", "tf", "tf_keras"]:
import tensorflow
return tensorflow.saved_model.save, tensorflow.saved_model.load, TF_DIRNAME
elif framework_name in ['lightgbm']:
from federatedml.protobuf.homo_model_convert.lightgbm.gbdt import save_lgb, load_lgb
return save_lgb, load_lgb, LGB_FILENAME
else:
raise NotImplementedError("save method for framework: {} is not implemented"
.format(framework_name))
def save_converted_model(model_object,
framework_name: str,
base_dir: str):
"""Save the model into target destination
:param model_object: the model object
:param framework_name: name of the framework of the model
:param base_dir: the base directory to save the model file
:return: local file/folder path
"""
save, _, dest_filename = _get_model_saver_loader(framework_name)
dest = os.path.join(base_dir, dest_filename)
save(model_object, dest)
LOGGER.info(f"Saved {framework_name} model to {dest}")
return dest
def load_converted_model(framework_name: str,
base_dir: str):
"""Load a model from the specified directory previously used to save the converted model
:param framework_name: name of the framework of the model
:param base_dir: the base directory to save the model file
:return: model object of the specified framework
"""
_, load, src_filename = _get_model_saver_loader(framework_name)
src = os.path.join(base_dir, src_filename)
if not os.path.exists(src):
raise FileNotFoundError(
"expected file or folder {} doesn't exist".format(src))
return load(src)
| 6,574 | 38.136905 | 94 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/lightgbm/gbdt.py
|
import numpy as np
import lightgbm as lgb
from ..component_converter import ComponentConverterBase
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam, \
DecisionTreeModelParam, NodeParam
from federatedml.util import consts
from federatedml.util import LOGGER
"""
We only keep the necessary variable to make sure that lightgbm can run predict function on the converted model
"""
FAKE_FEATURE_INFO_STR = '[0:1] '
END_OF_TREE = 'end of trees'
END_OF_PARA = 'end of parameters'
SPLIT = '\n\n'
HEADER_TEMPLATE = """tree
version=v3
num_class={}
num_tree_per_iteration={}
label_index={}
max_feature_idx={}
objective={}
feature_names={}
feature_infos={}
"""
TREE_TEMPLATE = """Tree={}
num_leaves={}
num_cat={}
split_feature={}
threshold={}
decision_type={}
left_child={}
right_child={}
leaf_value={}
internal_value={}
shrinkage={}
"""
PARA_TEMPLATE = """parameters:
[boosting: gbdt]
[objective: {}]
[num_iterations: {}]
[learning_rate: {}]
[max_depth: {}]
[max_bin: {}]
[use_missing: {}]
[zero_as_missing: {}]
[num_class: {}]
[lambda_l1: {}]
[lambda_l2: {}]
[min_data_in_leaf: {}]
[min_gain_to_split: {}]
"""
LGB_OBJECTIVE = {
consts.BINARY: "binary sigmoid:1",
consts.REGRESSION: "regression",
consts.MULTY: 'multiclass num_class:{}'
}
PARA_OBJECTIVE = {
consts.BINARY: "binary",
consts.REGRESSION: "regression",
consts.MULTY: 'multiclass'
}
def get_decision_type(node: NodeParam, use_missing, zero_as_missing):
# 00 0 0
# Nan,0 or None default left or right? cat feature or not?
default_type = 0 # 0000 None, default right, not cat feat
if not use_missing:
return default_type
if node.missing_dir == -1:
default_type = default_type | 2 # 0010
if zero_as_missing:
default_type = default_type | 4 # 0100 0
else:
default_type = default_type | 8 # 1000 np.Nan
return default_type
def get_lgb_objective(task_type, num_classes, ret_dict, need_multi_format=True):
if task_type == consts.CLASSIFICATION:
if num_classes == 1:
objective = ret_dict[consts.BINARY]
else:
objective = ret_dict[consts.MULTY].format(num_classes) if need_multi_format else ret_dict[consts.MULTY]
else:
objective = ret_dict[consts.REGRESSION]
return objective
def list_to_str(l_):
return str(l_).replace('[', '').replace(']', '').replace(',', '')
def parse_header(param: BoostingTreeModelParam, meta: BoostingTreeModelMeta):
# generated header of lgb str model file
# binary/regression num class is 1 in lgb
num_classes = len(param.classes_) if len(param.classes_) > 2 else 1
objective = get_lgb_objective(meta.task_type, num_classes, LGB_OBJECTIVE, need_multi_format=True)
num_tree_per_iteration = param.tree_dim
label_index = 0 # by default
max_feature_idx = len(param.feature_name_fid_mapping) - 1
feature_names = ''
for name in [param.feature_name_fid_mapping[i] for i in range(max_feature_idx + 1)]:
if ' ' in name: # space is not allowed
name = name.replace(' ', '-')
feature_names += name + ' '
feature_names = feature_names[:-1]
feature_info = FAKE_FEATURE_INFO_STR * (max_feature_idx + 1) # need to make fake feature info
feature_info = feature_info[:-1]
result_str = HEADER_TEMPLATE.format(num_classes, num_tree_per_iteration, label_index, max_feature_idx,
objective, feature_names, feature_info)
return result_str
def internal_count_computer(cur_id, tree_node, leaf_count, internal_count):
if cur_id in leaf_count:
return leaf_count[cur_id]
left_count = internal_count_computer(tree_node[cur_id].left_nodeid, tree_node, leaf_count, internal_count)
right_count = internal_count_computer(tree_node[cur_id].right_nodeid, tree_node, leaf_count, internal_count)
internal_count[cur_id] = left_count + right_count
return internal_count[cur_id]
def compute_internal_count(tree_param: DecisionTreeModelParam):
root = tree_param.tree_[0]
internal_count = {}
leaf_count = tree_param.leaf_count
root_count = internal_count_computer(root.id, tree_param.tree_, leaf_count, internal_count)
if root.id not in internal_count:
internal_count[root_count] = root_count
return internal_count
def update_leaf_count(param):
# in homo sbt, sometimes a leaf covers no sample, so need to add 1 to leaf count
tmp = {}
for i in param.leaf_count:
tmp[i] = param.leaf_count[i]
for i in tmp:
if tmp[i] == 0:
param.leaf_count[i] += 1
def parse_a_tree(
param: DecisionTreeModelParam,
tree_idx: int,
use_missing=False,
zero_as_missing=False,
learning_rate=0.1,
init_score=None):
split_feature = []
split_threshold = []
decision_type = []
internal_weight = []
leaf_weight = []
left, right = [], []
leaf_idx = -1
lgb_node_idx = 0
sbt_lgb_node_map = {}
is_leaf = []
leaf_count = []
internal_count, internal_count_dict = [], {}
has_count_info = len(param.leaf_count) != 0
# compute internal count
if has_count_info:
update_leaf_count(param)
internal_count_dict = compute_internal_count(param) # get internal count from leaf count
# mark leaf nodes and get sbt-lgb node mapping
for node in param.tree_:
is_leaf.append(node.is_leaf)
if not node.is_leaf:
sbt_lgb_node_map[node.id] = lgb_node_idx
lgb_node_idx += 1
for cur_idx, node in enumerate(param.tree_):
if not node.is_leaf:
split_feature.append(node.fid)
# if is hetero model need to decode split point and missing dir
if param.split_maskdict and param.missing_dir_maskdict is not None:
node.bid = param.split_maskdict[node.id]
node.missing_dir = param.missing_dir_maskdict[node.id]
# extract split point and weight
split_threshold.append(node.bid)
internal_weight.append(node.weight)
# add internal count
if has_count_info:
internal_count.append(internal_count_dict[node.id])
if is_leaf[node.left_nodeid]: # generate lgb leaf idx
left.append(leaf_idx)
if has_count_info:
leaf_count.append(param.leaf_count[node.left_nodeid])
leaf_idx -= 1
else:
left.append(sbt_lgb_node_map[node.left_nodeid])
if is_leaf[node.right_nodeid]: # generate lgb leaf idx
right.append(leaf_idx)
if has_count_info:
leaf_count.append(param.leaf_count[node.right_nodeid])
leaf_idx -= 1
else:
right.append(sbt_lgb_node_map[node.right_nodeid])
# get lgb decision type
decision_type.append(get_decision_type(node, use_missing, zero_as_missing))
else:
# regression model need to add init score
if init_score is not None:
score = node.weight * learning_rate + init_score
else:
# leaf value is node.weight * learning_rate in lgb
score = node.weight * learning_rate
leaf_weight.append(score)
leaves_num = len(leaf_weight)
num_cat = 0
# to string
result_str = TREE_TEMPLATE.format(tree_idx, leaves_num, num_cat, list_to_str(split_feature),
list_to_str(split_threshold), list_to_str(decision_type),
list_to_str(left), list_to_str(right), list_to_str(leaf_weight),
list_to_str(internal_weight), learning_rate)
if len(internal_count) != 0:
result_str += 'internal_count={}\n'.format(list_to_str(internal_count))
if len(leaf_count) != 0:
result_str += 'leaf_count={}\n'.format(list_to_str(leaf_count))
return result_str
def parse_feature_importance(param):
feat_importance_str = "feature_importances:\n"
mapping = param.feature_name_fid_mapping
for impt in param.feature_importances:
impt_val = impt.importance
try:
if impt.main == 'split':
impt_val = int(impt_val)
except BaseException:
LOGGER.warning("old version protobuf contains no filed 'main'")
feat_importance_str += '{}={}\n'.format(mapping[impt.fid], impt_val)
return feat_importance_str
def parse_parameter(param, meta):
"""
we only keep parameters offered by SBT
"""
tree_meta = meta.tree_meta
num_classes = 1 if meta.task_type == consts.CLASSIFICATION and param.num_classes < 3 else param.num_classes
objective = get_lgb_objective(meta.task_type, num_classes, PARA_OBJECTIVE, need_multi_format=False)
rs = PARA_TEMPLATE.format(objective, meta.num_trees, meta.learning_rate, tree_meta.max_depth,
meta.quantile_meta.bin_num, meta.tree_meta.use_missing + 0,
meta.tree_meta.zero_as_missing + 0,
num_classes, tree_meta.criterion_meta.criterion_param[0],
tree_meta.criterion_meta.criterion_param[1],
tree_meta.min_leaf_node,
tree_meta.min_impurity_split
)
return rs
def sbt_to_lgb(model_param: BoostingTreeModelParam,
model_meta: BoostingTreeModelMeta,
load_feature_importance=True):
"""
Transform sbt model to lgb model
"""
result = ''
# parse header
header_str = parse_header(model_param, model_meta)
use_missing = model_meta.tree_meta.use_missing
zero_as_missing = model_meta.tree_meta.zero_as_missing
learning_rate = model_meta.learning_rate
tree_str_list = []
# parse tree
for idx, param in enumerate(model_param.trees_):
if idx == 0 and model_meta.task_type == consts.REGRESSION: # regression task has init score
init_score = model_param.init_score[0]
else:
init_score = 0
tree_str_list.append(parse_a_tree(param, idx, use_missing, zero_as_missing, learning_rate, init_score))
# add header and tree str to result
result += header_str + '\n'
for s in tree_str_list:
result += s
result += SPLIT
result += END_OF_TREE
# handle feature importance
if load_feature_importance:
feat_importance_str = parse_feature_importance(model_param)
result += SPLIT + feat_importance_str
# parameters
para_str = parse_parameter(model_param, model_meta)
result += '\n' + para_str + '\n' + END_OF_PARA + '\n'
result += '\npandas_categorical:[]\n'
return result
def save_lgb(model: lgb.Booster, path):
model_str = model.model_to_string()
f = open(path, 'w')
f.write(model_str)
f.close()
def load_lgb(path):
f = open(path, 'r')
model_str = f.read()
f.close()
lgb_model = lgb.Booster(model_str=model_str)
return lgb_model
class HomoSBTComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoSecureboost']
def convert(self, model_dict):
param_obj = model_dict["HomoSecureBoostingTreeGuestParam"]
meta_obj = model_dict["HomoSecureBoostingTreeGuestMeta"]
lgb_model_str = sbt_to_lgb(param_obj, meta_obj)
lgb_model = lgb.Booster(model_str=lgb_model_str)
return lgb_model
| 11,820 | 31.03523 | 115 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/lightgbm/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/test/homo_nn_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import tempfile
import torch as t
from collections import OrderedDict
from federatedml.nn.backend.utils.common import get_torch_model_bytes
from federatedml.protobuf.homo_model_convert.homo_model_convert import model_convert, save_converted_model
from federatedml.protobuf.generated.homo_nn_model_meta_pb2 import HomoNNMeta
from federatedml.protobuf.generated.homo_nn_model_param_pb2 import HomoNNParam
class FakeModule(t.nn.Module):
def __init__(self):
super(FakeModule, self).__init__()
self.fc = t.nn.Linear(100, 10)
self.transformer = t.nn.Transformer()
def forward(self, x):
print(self.fc)
return x
class TestHomoNNConverter(unittest.TestCase):
def _get_param_meta(self, torch_model):
param = HomoNNParam()
meta = HomoNNMeta()
# save param
param.model_bytes = get_torch_model_bytes({'model': torch_model.state_dict()})
return param, meta
def setUp(self):
self.param_list = []
self.meta_list = []
self.model_list = []
# generate some pytorch model
model = t.nn.Sequential(
t.nn.Linear(10, 10),
t.nn.ReLU(),
t.nn.LSTM(input_size=10, hidden_size=10),
t.nn.Sigmoid()
)
self.model_list.append(model)
param, meta = self._get_param_meta(model)
self.param_list.append(param)
self.meta_list.append(meta)
model = t.nn.Sequential(t.nn.ReLU())
self.model_list.append(model)
param, meta = self._get_param_meta(model)
self.param_list.append(param)
self.meta_list.append(meta)
fake_model = FakeModule()
self.model_list.append(fake_model)
param, meta = self._get_param_meta(fake_model)
self.param_list.append(param)
self.meta_list.append(meta)
def test_pytorch_converter(self):
for param, meta, origin_model in zip(self.param_list, self.meta_list, self.model_list):
target_framework, model = self._do_convert(param, meta)
self.assertTrue(target_framework == "pytorch")
self.assertTrue(isinstance(model['model'], OrderedDict)) # state dict
origin_model.load_state_dict(model['model']) # can load state dict
with tempfile.TemporaryDirectory() as d:
dest = save_converted_model(model, target_framework, d)
self.assertTrue(os.path.isfile(dest))
self.assertTrue(dest.endswith(".pth"))
@staticmethod
def _do_convert(model_param, model_meta):
return model_convert(model_contents={
'HomoNNParam': model_param,
'HomoNNMeta': model_meta
},
module_name='HomoNN')
if __name__ == '__main__':
unittest.main()
| 3,476 | 33.425743 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/test/homo_lr_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import tempfile
from sklearn.linear_model import LogisticRegression
from federatedml.protobuf.homo_model_convert.homo_model_convert import model_convert, save_converted_model
from federatedml.protobuf.generated.lr_model_param_pb2 import LRModelParam
from federatedml.protobuf.generated.lr_model_meta_pb2 import LRModelMeta
class TestHomoLRConverter(unittest.TestCase):
def setUp(self):
param_dict = {
'iters': 5,
'loss_history': [],
'is_converged': True,
'weight': {
'x3': 0.3,
'x2': 0.2,
'x1': 0.1
},
'intercept': 0.5,
'header': ['x1', 'x2', 'x3'],
'best_iteration': -1
}
self.model_param = LRModelParam(**param_dict)
meta_dict = {
'penalty': 'l2',
'tol': 1e-05,
'fit_intercept': True,
'optimizer': 'sgd',
'max_iter': 5,
'alpha': 0.01
}
self.model_meta = LRModelMeta(**meta_dict)
def test_sklearn_converter(self):
target_framework, model = model_convert(model_contents={
'HomoLogisticRegressionParam': self.model_param,
'HomoLogisticRegressionMeta': self.model_meta
},
module_name='HomoLR',
framework_name='sklearn')
self.assertTrue(target_framework == 'sklearn')
self.assertTrue(isinstance(model, LogisticRegression))
self.assertTrue(model.intercept_[0] == self.model_param.intercept)
self.assertTrue(model.coef_.shape == (1, len(self.model_param.header)))
self.assertTrue(model.tol == self.model_meta.tol)
with tempfile.TemporaryDirectory() as d:
dest = save_converted_model(model, target_framework, d)
self.assertTrue(os.path.isfile(dest))
self.assertTrue(dest.endswith(".joblib"))
if __name__ == '__main__':
unittest.main()
| 2,649 | 33.415584 | 106 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/tf_keras/nn.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import tempfile
import tensorflow
import zipfile
from ..component_converter import ComponentConverterBase
class NNComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoNN']
def convert(self, model_dict):
param_obj = model_dict["HomoNNModelParam"]
meta_obj = model_dict["HomoNNModelMeta"]
if meta_obj.params.config_type != "nn" and meta_obj.params.config_type != "keras":
raise ValueError("Invalid config type: {}".format(meta_obj.config_type))
with tempfile.TemporaryDirectory() as tmp_path:
with io.BytesIO(param_obj.saved_model_bytes) as bytes_io:
with zipfile.ZipFile(bytes_io, 'r', zipfile.ZIP_DEFLATED) as f:
f.extractall(tmp_path)
try:
model = tensorflow.keras.models.load_model(tmp_path)
except Exception as e:
model = tensorflow.compat.v1.keras.experimental.load_from_saved_model(tmp_path)
return model
| 1,695 | 35.869565 | 103 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/tf_keras/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/sklearn/logistic_regression.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from sklearn.linear_model import LogisticRegression
from ..component_converter import ComponentConverterBase
class LRComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoLR']
def convert(self, model_dict):
param_obj = model_dict["HomoLogisticRegressionParam"]
meta_obj = model_dict["HomoLogisticRegressionMeta"]
sk_lr_model = LogisticRegression(penalty=meta_obj.penalty.lower(),
tol=meta_obj.tol,
fit_intercept=meta_obj.fit_intercept,
max_iter=meta_obj.max_iter)
coefficient = np.empty((1, len(param_obj.header)))
for index in range(len(param_obj.header)):
coefficient[0][index] = param_obj.weight[param_obj.header[index]]
sk_lr_model.coef_ = coefficient
sk_lr_model.intercept_ = np.array([param_obj.intercept])
# hard-coded 0-1 classification as HomoLR only supports this for now
sk_lr_model.classes_ = np.array([0., 1.])
sk_lr_model.n_iter_ = [param_obj.iters]
return sk_lr_model
| 1,812 | 37.574468 | 78 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/sklearn/__init__.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/protobuf/homo_model_convert/pytorch/nn.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import torch as t
import tempfile
from ..component_converter import ComponentConverterBase
class NNComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoNN']
def convert(self, model_dict):
param_obj = model_dict["HomoNNParam"]
meta_obj = model_dict["HomoNNMeta"]
if not hasattr(param_obj, 'model_bytes'):
raise ValueError("Did not find model_bytes in model param protobuf")
with tempfile.TemporaryFile() as f:
f.write(param_obj.model_bytes)
f.seek(0)
model_dict = t.load(f)
return model_dict
| 1,278 | 28.744186 | 80 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.