repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
FATE
FATE-master/python/federatedml/feature/feature_selection/model_adapter/pearson_adapter.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from federatedml.feature.feature_selection.model_adapter import isometric_model from federatedml.feature.feature_selection.model_adapter.adapter_base import BaseAdapter from federatedml.util import consts class PearsonMetricInfo(object): def __init__( self, local_corr, col_names, corr=None, host_col_names=None, parties=None ): self.local_corr = local_corr self.col_names = col_names self.corr = corr self.host_col_names = host_col_names self.parties = parties @property def host_party_id(self): assert isinstance(self.parties, list) and len(self.parties) == 2 return self.parties[1][1] class PearsonAdapter(BaseAdapter): def convert(self, model_meta, model_param): col_names = list(model_param.names) result = isometric_model.IsometricModel() # corr local_corr = np.array(model_param.local_corr).reshape( model_param.shape, model_param.shape ) if model_param.corr: corr = np.array(model_param.corr).reshape(*model_param.shapes) host_names = list(list(model_param.all_names)[1].names) parties = list(model_param.parties) else: corr = None host_names = None parties = None pearson_metric = PearsonMetricInfo( local_corr=local_corr, col_names=col_names, corr=corr, host_col_names=host_names, parties=parties, ) result.add_metric_value(metric_name=consts.PEARSON, metric_info=pearson_metric) # local vif local_vif = model_param.local_vif if local_vif: single_info = isometric_model.SingleMetricInfo( values=local_vif, col_names=col_names ) result.add_metric_value(metric_name=consts.VIF, metric_info=single_info) return result
2,559
34.068493
88
py
FATE
FATE-master/python/federatedml/feature/feature_selection/model_adapter/isometric_model.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from federatedml.util import LOGGER class SingleMetricInfo(object): """ Use to Store Metric values Parameters ---------- values: ndarray or list List of metric value of each column. Do not accept missing value. col_names: list List of column_names of above list whose length should match with above values. host_party_ids: list of int (party_id, such as 9999) If it is a federated metric, list of host party ids host_values: list of ndarray The outer list specify each host's values. The inner list are the values of this party host_col_names: list of list Similar to host_values where the content is col_names """ def __init__(self, values, col_names, host_party_ids=None, host_values=None, host_col_names=None): if host_party_ids is None: host_party_ids = [] if host_values is None: host_values = [] if host_col_names is None: host_col_names = [] self.values = values self.col_names = col_names self.host_party_ids = host_party_ids self.host_values = host_values self.host_col_names = host_col_names self.check() def check(self): if len(self.values) != len(self.col_names): raise ValueError("When creating SingleMetricValue, length of values " "and length of col_names should be equal") if not (len(self.host_party_ids) == len(self.host_values) == len(self.host_col_names)): raise ValueError("When creating SingleMetricValue, length of values " "and length of col_names and host_party_ids should be equal") def union_result(self): values = list(self.values) col_names = [("guest", x) for x in self.col_names] for idx, host_id in enumerate(self.host_party_ids): values.extend(self.host_values[idx]) col_names.extend([(host_id, x) for x in self.host_col_names[idx]]) if len(values) != len(col_names): raise AssertionError("union values and col_names should have same length") values = np.array(values) return values, col_names def get_values(self): return copy.deepcopy(self.values) def get_col_names(self): return copy.deepcopy(self.col_names) def get_partial_values(self, select_col_names, party_id=None): """ Return values selected by provided col_names. Use party_id to indicate which party to get. If None, obtain from values, otherwise, obtain from host_values """ if party_id is None: col_name_map = {name: idx for idx, name in enumerate(self.col_names)} col_indices = [col_name_map[x] for x in select_col_names] values = np.array(self.values)[col_indices] else: if party_id not in self.host_party_ids: raise ValueError(f"party_id: {party_id} is not in host_party_ids:" f" {self.host_party_ids}") party_idx = self.host_party_ids.index(party_id) col_name_map = {name: idx for idx, name in enumerate(self.host_col_names[party_idx])} # LOGGER.debug(f"col_name_map: {col_name_map}") values = [] host_values = np.array(self.host_values[party_idx]) for host_col_name in select_col_names: if host_col_name in col_name_map: values.append(host_values[col_name_map[host_col_name]]) else: values.append(0) # col_indices = [col_name_map[x] for x in select_col_names] # values = np.array(self.host_values[party_idx])[col_indices] return list(values) class IsometricModel(object): """ Use to Store Metric values Parameters ---------- metric_name: list of str The metric name, eg. iv. If a single string metric_info: list of SingleMetricInfo """ def __init__(self, metric_name=None, metric_info=None): if metric_name is None: metric_name = [] if not isinstance(metric_name, list): metric_name = [metric_name] if metric_info is None: metric_info = [] if not isinstance(metric_info, list): metric_info = [metric_info] self._metric_names = metric_name self._metric_info = metric_info def add_metric_value(self, metric_name, metric_info): self._metric_names.append(metric_name) self._metric_info.append(metric_info) @property def valid_value_name(self): return self._metric_names def get_metric_info(self, metric_name): LOGGER.debug(f"valid_value_name: {self.valid_value_name}, " f"metric_name: {metric_name}") if metric_name not in self.valid_value_name: return None return self._metric_info[self._metric_names.index(metric_name)] def get_all_metric_info(self): return self._metric_info
5,845
32.405714
95
py
FATE
FATE-master/python/federatedml/feature/feature_selection/model_adapter/adapter_base.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC class BaseAdapter(ABC): def convert(self, model_meta, model_param): raise NotImplementedError("Should not run here")
812
32.875
75
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/homo_binning_cpn.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.model_base import ModelBase from federatedml.param.feature_binning_param import HomoFeatureBinningParam from federatedml.feature.homo_feature_binning import virtual_summary_binning, recursive_query_binning from federatedml.util import consts from federatedml.feature.hetero_feature_binning.base_feature_binning import BaseFeatureBinning from federatedml.transfer_variable.transfer_class.homo_binning_transfer_variable import HomoBinningTransferVariable class HomoBinningArbiter(BaseFeatureBinning): def __init__(self): super().__init__() self.binning_obj = None self.transfer_variable = HomoBinningTransferVariable() self.model_param = HomoFeatureBinningParam() def _init_model(self, model_param): self.model_param = model_param if self.model_param.method == consts.VIRTUAL_SUMMARY: self.binning_obj = virtual_summary_binning.Server(self.model_param) elif self.model_param.method == consts.RECURSIVE_QUERY: self.binning_obj = recursive_query_binning.Server(self.model_param) else: raise ValueError(f"Method: {self.model_param.method} cannot be recognized") def fit(self, *args): self.binning_obj.set_transfer_variable(self.transfer_variable) self.binning_obj.fit_split_points() def transform(self, data_instances): pass class HomoBinningClient(BaseFeatureBinning): def __init__(self): super().__init__() self.binning_obj = None self.transfer_variable = HomoBinningTransferVariable() self.model_param = HomoFeatureBinningParam() def _init_model(self, model_param: HomoFeatureBinningParam): self.transform_type = self.model_param.transform_param.transform_type self.model_param = model_param if self.model_param.method == consts.VIRTUAL_SUMMARY: self.binning_obj = virtual_summary_binning.Client(self.model_param) elif self.model_param.method == consts.RECURSIVE_QUERY: self.binning_obj = recursive_query_binning.Client(role=self.component_properties.role, params=self.model_param ) else: raise ValueError(f"Method: {self.model_param.method} cannot be recognized") def fit(self, data_instances): self._abnormal_detection(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) transformed_instances = data_instances.mapValues(self.data_format_transform) transformed_instances.schema = self.schema self.binning_obj.set_bin_inner_param(self.bin_inner_param) self.binning_obj.set_transfer_variable(self.transfer_variable) split_points = self.binning_obj.fit_split_points(transformed_instances) data_out = self.transform(data_instances) summary = {} for k, v in split_points.items(): summary[k] = list(v) self.set_summary({"split_points": summary}) return data_out def transform(self, data_instances): return self.transform_data(data_instances)
3,808
43.811765
115
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/homo_binning_base.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import numpy as np from federatedml.feature.binning.base_binning import BaseBinning from federatedml.framework import weights from fate_arch.session import computing_session as session from federatedml.param.feature_binning_param import HomoFeatureBinningParam from federatedml.statistic.data_statistics import MultivariateStatisticalSummary from federatedml.transfer_variable.transfer_class.homo_binning_transfer_variable import HomoBinningTransferVariable from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer from federatedml.util import consts class SplitPointNode(object): def __init__(self, value, min_value, max_value, aim_rank=None, allow_error_rank=0, last_rank=-1): self.value = value self.min_value = min_value self.max_value = max_value self.aim_rank = aim_rank self.allow_error_rank = allow_error_rank self.last_rank = last_rank self.fixed = False def set_aim_rank(self, rank): self.aim_rank = rank def create_right_new(self): value = (self.value + self.max_value) / 2 if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.9: self.value += consts.FLOAT_ZERO * 0.9 self.fixed = True return self min_value = self.value return SplitPointNode(value, min_value, self.max_value, self.aim_rank, self.allow_error_rank) def create_left_new(self): value = (self.value + self.min_value) / 2 if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.9: self.value += consts.FLOAT_ZERO * 0.9 self.fixed = True return self max_value = self.value return SplitPointNode(value, self.min_value, max_value, self.aim_rank, self.allow_error_rank) class RankArray(object): def __init__(self, rank_array, error_rank, last_rank_array=None): self.rank_array = rank_array self.last_rank_array = last_rank_array self.error_rank = error_rank self.all_fix = False self.fixed_array = np.zeros(len(self.rank_array), dtype=bool) self._compare() def _compare(self): if self.last_rank_array is None: return else: self.fixed_array = abs(self.rank_array - self.last_rank_array) < self.error_rank assert isinstance(self.fixed_array, np.ndarray) if (self.fixed_array).all(): self.all_fix = True def __iadd__(self, other: 'RankArray'): for idx, is_fixed in enumerate(self.fixed_array): if not is_fixed: self.rank_array[idx] += other.rank_array[idx] self._compare() return self def __add__(self, other: 'RankArray'): res_array = [] for idx, is_fixed in enumerate(self.fixed_array): if not is_fixed: res_array.append(self.rank_array[idx] + other.rank_array[idx]) else: res_array.append(self.rank_array[idx]) return RankArray(np.array(res_array), self.error_rank, self.last_rank_array) class Server(BaseBinning): def __init__(self, params=None, abnormal_list=None): super().__init__(params, abnormal_list) self.aggregator: SecureAggregatorServer = None self.transfer_variable = HomoBinningTransferVariable() self.suffix = None def set_suffix(self, suffix): self.suffix = suffix def set_transfer_variable(self, variable): self.transfer_variable = variable def set_aggregator(self, aggregator): self.aggregator = aggregator def get_total_count(self): # total_count = self.aggregator.sum_model(suffix=(self.suffix, 'total_count')) # self.aggregator.send_aggregated_model(total_count, suffix=(self.suffix, 'total_count')) total_count = self.aggregator.aggregate_model(suffix=(self.suffix, 'total_count')) self.aggregator.broadcast_model(total_count, suffix=(self.suffix, 'total_count')) return total_count def get_missing_count(self): # missing_count = self.aggregator.sum_model(suffix=(self.suffix, 'missing_count')) # self.aggregator.send_aggregated_model(missing_count, suffix=(self.suffix, 'missing_count')) missing_count = self.aggregator.aggregate_model(suffix=(self.suffix, 'missing_count')) self.aggregator.broadcast_model(missing_count, suffix=(self.suffix, 'missing_count')) return missing_count def get_min_max(self): local_values = self.transfer_variable.local_static_values.get(suffix=(self.suffix, "min-max")) max_array, min_array = [], [] for local_max, local_min in local_values: max_array.append(local_max) min_array.append(local_min) max_values = np.max(max_array, axis=0) min_values = np.min(min_array, axis=0) self.transfer_variable.global_static_values.remote((max_values, min_values), suffix=(self.suffix, "min-max")) return min_values, max_values def query_values(self): # rank_weight = self.aggregator.aggregate_tables(suffix=(self.suffix, 'rank')) # self.aggregator.send_aggregated_tables(rank_weight, suffix=(self.suffix, 'rank')) rank_weight = self.aggregator.aggregate_model(suffix=(self.suffix, 'rank')) self.aggregator.broadcast_model(rank_weight, suffix=(self.suffix, 'rank')) class Client(BaseBinning): def __init__(self, params: HomoFeatureBinningParam = None, abnormal_list=None): super().__init__(params, abnormal_list) self.aggregator: SecureAggregatorClient = None self.transfer_variable = HomoBinningTransferVariable() self.max_values, self.min_values = None, None self.suffix = None self.total_count = 0 def set_suffix(self, suffix): self.suffix = suffix def set_transfer_variable(self, variable): self.transfer_variable = variable def set_aggregator(self, aggregator): self.aggregator = aggregator def get_total_count(self, data_inst): count = data_inst.count() count_weight = weights.NumericWeights(count) self.aggregator.send_model(count_weight, suffix=(self.suffix, 'total_count')) total_count = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'total_count')).unboxed return total_count def get_missing_count(self, summary_table): missing_table = summary_table.mapValues(lambda x: x.missing_count) missing_value_counts = dict(missing_table.collect()) missing_weight = weights.DictWeights(missing_value_counts) self.aggregator.send_model(missing_weight, suffix=(self.suffix, 'missing_count')) missing_counts = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'missing_count')).unboxed return missing_counts def get_min_max(self, data_inst): """ Get max and min value of each selected columns Returns: max_values, min_values: dict eg. {"x1": 10, "x2": 3, ... } """ if self.max_values and self.min_values: return self.max_values, self.min_values statistic_obj = MultivariateStatisticalSummary(data_inst, cols_index=self.bin_inner_param.bin_indexes, abnormal_list=self.abnormal_list, error=self.params.error) max_values = statistic_obj.get_max() min_values = statistic_obj.get_min() max_list = [max_values[x] for x in self.bin_inner_param.bin_names] min_list = [min_values[x] for x in self.bin_inner_param.bin_names] local_min_max_values = (max_list, min_list) self.transfer_variable.local_static_values.remote(local_min_max_values, suffix=(self.suffix, "min-max")) self.max_values, self.min_values = self.transfer_variable.global_static_values.get( idx=0, suffix=(self.suffix, "min-max")) return self.max_values, self.min_values def init_query_points(self, partitions, split_num, error_rank=1, need_first=True): query_points = [] for idx, col_name in enumerate(self.bin_inner_param.bin_names): max_value = self.max_values[idx] min_value = self.min_values[idx] sps = np.linspace(min_value, max_value, split_num) if not need_first: sps = sps[1:] split_point_array = [SplitPointNode(sps[i], min_value, max_value, allow_error_rank=error_rank) for i in range(len(sps))] query_points.append((col_name, split_point_array)) query_points_table = session.parallelize(query_points, include_key=True, partition=partitions) return query_points_table def query_values(self, summary_table, query_points): local_ranks = summary_table.join(query_points, self._query_table) self.aggregator.send_model(local_ranks, suffix=(self.suffix, 'rank')) global_rank = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'rank')) global_rank = global_rank.mapValues(lambda x: np.array(x, dtype=int)) return global_rank @staticmethod def _query_table(summary, query_points): queries = [x.value for x in query_points] original_idx = np.argsort(np.argsort(queries)) queries = np.sort(queries) ranks = summary.query_value_list(queries) ranks = np.array(ranks)[original_idx] return np.array(ranks, dtype=int)
10,494
43.096639
115
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/homo_split_points.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.framework.weights import DictWeights from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer from federatedml.util import abnormal_detection from federatedml.util import consts class HomoFeatureBinningServer(object): def __init__(self): self.aggregator = SecureAggregatorServer(secure_aggregate=True, communicate_match_suffix='homo_feature_binning') self.suffix = tuple() def set_suffix(self, suffix): self.suffix = suffix def average_run(self, data_instances=None, bin_param: FeatureBinningParam = None, bin_num=10, abnormal_list=None): agg_split_points = self.aggregator.aggregate_model(suffix=self.suffix) self.aggregator.broadcast_model(agg_split_points, suffix=self.suffix) def fit(self, *args, **kwargs): pass def query_quantile_points(self, data_instances, quantile_points): suffix = tuple(list(self.suffix) + [str(quantile_points)]) agg_quantile_points = self.aggregator.aggregate_model(suffix=suffix) self.aggregator.broadcast_model(agg_quantile_points, suffix=suffix) class HomoFeatureBinningClient(object): def __init__(self, bin_method=consts.QUANTILE): self.aggregator = SecureAggregatorClient( secure_aggregate=True, aggregate_type='mean', communicate_match_suffix='homo_feature_binning') self.suffix = tuple() self.bin_method = bin_method self.bin_obj: QuantileBinning = None self.bin_param = None self.abnormal_list = None def set_suffix(self, suffix): self.suffix = suffix def average_run(self, data_instances, bin_num=10, abnormal_list=None): if self.bin_param is None: bin_param = FeatureBinningParam(bin_num=bin_num) self.bin_param = bin_param else: bin_param = self.bin_param if self.bin_method == consts.QUANTILE: bin_obj = QuantileBinning(params=bin_param, abnormal_list=abnormal_list, allow_duplicate=True) else: raise ValueError("Homo Split Point do not accept bin_method: {}".format(self.bin_method)) abnormal_detection.empty_table_detection(data_instances) abnormal_detection.empty_feature_detection(data_instances) split_points = bin_obj.fit_split_points(data_instances) split_points = {k: np.array(v) for k, v in split_points.items()} split_points_weights = DictWeights(d=split_points) self.aggregator.send_model(split_points_weights, self.suffix) dict_split_points = self.aggregator.get_aggregated_model(self.suffix) split_points = {k: list(v) for k, v in dict_split_points.unboxed.items()} self.bin_obj = bin_obj return split_points def convert_feature_to_bin(self, data_instances, split_points=None): if self.bin_obj is None: return None, None, None return self.bin_obj.convert_feature_to_bin(data_instances, split_points) def set_bin_param(self, bin_param: FeatureBinningParam): if self.bin_param is not None: raise RuntimeError("Bin param has been set and it's immutable") self.bin_param = bin_param return self def set_abnormal_list(self, abnormal_list): self.abnormal_list = abnormal_list return self def fit(self, data_instances): if self.bin_obj is not None: return self if self.bin_param is None: self.bin_param = FeatureBinningParam() self.bin_obj = QuantileBinning(params=self.bin_param, abnormal_list=self.abnormal_list, allow_duplicate=True) self.bin_obj.fit_split_points(data_instances) return self def query_quantile_points(self, data_instances, quantile_points): if self.bin_obj is None: self.fit(data_instances) # bin_col_names = self.bin_obj.bin_inner_param.bin_names query_result = self.bin_obj.query_quantile_point(quantile_points) query_points = DictWeights(d=query_result) suffix = tuple(list(self.suffix) + [str(quantile_points)]) self.aggregator.send_model(query_points, suffix) query_points = self.aggregator.get_aggregated_model(suffix) query_points = {k: v for k, v in query_points.unboxed.items()} return query_points
5,258
39.145038
120
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/recursive_query_binning.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from federatedml.feature.binning.quantile_tool import QuantileBinningTool from federatedml.feature.homo_feature_binning import homo_binning_base from federatedml.param.feature_binning_param import HomoFeatureBinningParam from federatedml.util import consts from federatedml.util import LOGGER from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer import copy import operator import functools class Server(homo_binning_base.Server): def __init__(self, params: HomoFeatureBinningParam, abnormal_list=None): super().__init__(params, abnormal_list) def fit_split_points(self, data=None): if self.aggregator is None: self.aggregator = SecureAggregatorServer( secure_aggregate=True, communicate_match_suffix='recursive_query_binning') self.get_total_count() self.get_min_max() self.get_missing_count() self.set_suffix(-1) self.query_values() n_iter = 0 while n_iter < self.params.max_iter: self.set_suffix(n_iter) is_converge = self.transfer_variable.is_converge.get(suffix=self.suffix)[0] if is_converge: break self.query_values() n_iter += 1 class Client(homo_binning_base.Client): def __init__(self, role, params: HomoFeatureBinningParam = None, abnormal_list=None, allow_duplicate=False): super().__init__(params, abnormal_list) self.allow_duplicate = allow_duplicate self.global_ranks = {} self.total_count = 0 self.missing_counts = 0 self.error = params.error self.error_rank = None self.role = role def fit_split_points(self, data_instances): if self.aggregator is None: self.aggregator = SecureAggregatorClient( secure_aggregate=True, aggregate_type='sum', communicate_match_suffix='recursive_query_binning') if self.bin_inner_param is None: self._setup_bin_inner_param(data_instances, self.params) self.total_count = self.get_total_count(data_instances) self.error_rank = np.ceil(self.error * self.total_count) LOGGER.debug(f"abnormal_list: {self.abnormal_list}") quantile_tool = QuantileBinningTool(param_obj=self.params, abnormal_list=self.abnormal_list, allow_duplicate=self.allow_duplicate) quantile_tool.set_bin_inner_param(self.bin_inner_param) summary_table = quantile_tool.fit_summary(data_instances) self.get_min_max(data_instances) self.missing_counts = self.get_missing_count(summary_table) split_points_table = self._recursive_querying(summary_table) split_points = dict(split_points_table.collect()) for col_name, sps in split_points.items(): sp = [x.value for x in sps] if not self.allow_duplicate: sp = sorted(set(sp)) res = [sp[0] if np.fabs(sp[0]) > consts.FLOAT_ZERO else 0.0] last = sp[0] for v in sp[1:]: if np.fabs(v) < consts.FLOAT_ZERO: v = 0.0 if np.abs(v - last) > consts.FLOAT_ZERO: res.append(v) last = v sp = np.array(res) self.bin_results.put_col_split_points(col_name, sp) return self.bin_results.all_split_points @staticmethod def _set_aim_rank(feature_name, split_point_array, missing_dict, total_counts, split_num): total_count = total_counts - missing_dict[feature_name] aim_ranks = [np.floor(x * total_count) for x in np.linspace(0, 1, split_num)] aim_ranks = aim_ranks[1:] for idx, sp in enumerate(split_point_array): sp.set_aim_rank(aim_ranks[idx]) return feature_name, split_point_array def _recursive_querying(self, summary_table): self.set_suffix(-1) query_points_table = self.init_query_points(summary_table.partitions, split_num=self.params.bin_num + 1, error_rank=self.error_rank, need_first=False) f = functools.partial(self._set_aim_rank, missing_dict=self.missing_counts, total_counts=self.total_count, split_num=self.params.bin_num + 1) query_points_table = query_points_table.map(f) global_ranks = self.query_values(summary_table, query_points_table) n_iter = 0 while n_iter < self.params.max_iter: self.set_suffix(n_iter) query_points_table = query_points_table.join(global_ranks, self.renew_query_points_table) is_converge = self.check_converge(query_points_table) if self.role == consts.GUEST: self.transfer_variable.is_converge.remote(is_converge, suffix=self.suffix) LOGGER.debug(f"n_iter: {n_iter}, converged: {is_converge}") if is_converge: break global_ranks = self.query_values(summary_table, query_points_table) n_iter += 1 return query_points_table @staticmethod def renew_query_points_table(query_points, ranks): assert len(query_points) == len(ranks) new_array = [] for idx, node in enumerate(query_points): rank = ranks[idx] if node.fixed: new_node = copy.deepcopy(node) elif rank - node.aim_rank > node.allow_error_rank: new_node = node.create_left_new() elif node.aim_rank - rank > node.allow_error_rank: new_node = node.create_right_new() else: new_node = copy.deepcopy(node) new_node.fixed = True new_node.last_rank = rank new_array.append(new_node) return new_array @staticmethod def check_converge(query_table): def is_all_fixed(node_array): fix_array = [n.fixed for n in node_array] return functools.reduce(operator.and_, fix_array) fix_table = query_table.mapValues(is_all_fixed) return fix_table.reduce(operator.and_)
7,153
42.357576
114
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/homo_feature_binning/virtual_summary_binning.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bisect import numpy as np import functools from federatedml.feature.binning.quantile_tool import QuantileBinningTool from federatedml.feature.homo_feature_binning import homo_binning_base from federatedml.param.feature_binning_param import HomoFeatureBinningParam from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient, SecureAggregatorServer from federatedml.util import LOGGER from federatedml.util import consts class Server(homo_binning_base.Server): def __init__(self, params=None, abnormal_list=None): super().__init__(params, abnormal_list) def fit_split_points(self, data=None): if self.aggregator is None: self.aggregator = SecureAggregatorServer(True, communicate_match_suffix='virtual_summary_binning') self.get_total_count() self.get_min_max() self.get_missing_count() self.query_values() class Client(homo_binning_base.Client): def __init__(self, params: HomoFeatureBinningParam = None, abnormal_list=None, allow_duplicate=False): super().__init__(params, abnormal_list) self.allow_duplicate = allow_duplicate self.query_points = None self.global_ranks = None self.total_count = 0 self.missing_count = 0 def fit(self, data_inst): if self.bin_inner_param is None: self._setup_bin_inner_param(data_inst, self.params) self.total_count = self.get_total_count(data_inst) LOGGER.debug(f"abnormal_list: {self.abnormal_list}") quantile_tool = QuantileBinningTool(param_obj=self.params, abnormal_list=self.abnormal_list, allow_duplicate=self.allow_duplicate) quantile_tool.set_bin_inner_param(self.bin_inner_param) summary_table = quantile_tool.fit_summary(data_inst) self.get_min_max(data_inst) self.missing_count = self.get_missing_count(summary_table) self.query_points = self.init_query_points(summary_table.partitions, split_num=self.params.sample_bins) self.global_ranks = self.query_values(summary_table, self.query_points) # self.total_count = data_inst.count() def fit_split_points(self, data_instances): if self.aggregator is None: self.aggregator = SecureAggregatorClient( secure_aggregate=True, aggregate_type='sum', communicate_match_suffix='virtual_summary_binning') self.fit(data_instances) query_func = functools.partial(self._query, bin_num=self.bin_num, missing_count=self.missing_count, total_count=self.total_count) split_point_table = self.query_points.join(self.global_ranks, lambda x, y: (x, y)) # split_point_table = self.query_points.join(self.global_ranks, query_func) split_point_table = split_point_table.map(query_func) split_points = dict(split_point_table.collect()) for col_name, sps in split_points.items(): self.bin_results.put_col_split_points(col_name, sps) # self._query(query_ranks) return self.bin_results.all_split_points def _query(self, feature_name, values, bin_num, missing_count, total_count): percent_value = 1.0 / bin_num # calculate the split points percentile_rate = [i * percent_value for i in range(1, bin_num)] percentile_rate.append(1.0) this_count = total_count - missing_count[feature_name] query_ranks = [int(x * this_count) for x in percentile_rate] query_points, global_ranks = values[0], values[1] query_values = [x.value for x in query_points] query_res = [] # query_ranks = [max(0, x - missing_count[feature_name]) for x in query_ranks] for rank in query_ranks: idx = bisect.bisect_left(global_ranks, rank) if idx >= len(global_ranks) - 1: approx_value = query_values[-1] query_res.append(approx_value) else: if np.fabs(query_values[idx + 1] - query_values[idx]) < consts.FLOAT_ZERO: query_res.append(query_values[idx]) elif np.fabs(global_ranks[idx + 1] - global_ranks[idx]) < consts.FLOAT_ZERO: query_res.append(query_values[idx]) else: approx_value = query_values[idx] + (query_values[idx + 1] - query_values[idx]) * \ ((rank - global_ranks[idx]) / (global_ranks[idx + 1] - global_ranks[idx])) query_res.append(approx_value) if not self.allow_duplicate: query_res = sorted(set(query_res)) return feature_name, query_res
5,525
43.926829
114
py
FATE
FATE-master/python/federatedml/feature/feature_scale/min_max_scale.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import functools import numpy as np from federatedml.protobuf.generated.feature_scale_meta_pb2 import ScaleMeta from federatedml.protobuf.generated.feature_scale_param_pb2 import ScaleParam from federatedml.protobuf.generated.feature_scale_param_pb2 import ColumnScaleParam from federatedml.feature.feature_scale.base_scale import BaseScale class MinMaxScale(BaseScale): """ Transforms features by scaling each feature to a given range,e.g.between minimum and maximum. The transformation is given by: X_scale = (X - X.min) / (X.max - X.min), while X.min is the minimum value of feature, and X.max is the maximum """ def __init__(self, params): super().__init__(params) self.mode = params.mode self.column_range = None @staticmethod def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list): """ Scale operator for each column. The input data type is data_instance """ features = np.array(data.features, dtype=float) for i in process_cols_list: value = features[i] if value > max_value_list[i]: value = max_value_list[i] elif value < min_value_list[i]: value = min_value_list[i] features[i] = (value - min_value_list[i]) / scale_value_list[i] _data = copy.deepcopy(data) _data.features = features return _data def fit(self, data): """ Apply min-max scale for input data Parameters ---------- data: data_instance, input data Returns ---------- fit_data:data_instance, data after scale """ self.column_min_value, self.column_max_value = self._get_min_max_value(data) self.scale_column_idx = self._get_scale_column_idx(data) self.header = self._get_header(data) self.column_range = [] for i in range(len(self.column_max_value)): scale = self.column_max_value[i] - self.column_min_value[i] if scale < 0: raise ValueError("scale value should large than 0") elif np.abs(scale - 0) < 1e-6: scale = 1 self.column_range.append(scale) f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value, min_value_list=self.column_min_value, scale_value_list=self.column_range, process_cols_list=self.scale_column_idx) fit_data = data.mapValues(f) return fit_data def transform(self, data): """ Transform input data using min-max scale with fit results Parameters ---------- data: data_instance, input data Returns ---------- transform_data:data_instance, data after transform """ self.column_range = [] for i in range(len(self.column_max_value)): scale = self.column_max_value[i] - self.column_min_value[i] if scale < 0: raise ValueError("scale value should large than 0") elif np.abs(scale - 0) < 1e-6: scale = 1 self.column_range.append(scale) f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value, min_value_list=self.column_min_value, scale_value_list=self.column_range, process_cols_list=self.scale_column_idx) transform_data = data.mapValues(f) return transform_data def _get_meta(self, need_run): if self.header: scale_column = [self.header[i] for i in self.scale_column_idx] else: scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx] if not self.data_shape: self.data_shape = -1 meta_proto_obj = ScaleMeta(method="min_max_scale", mode=self.mode, area="null", scale_column=scale_column, feat_upper=self._get_upper(self.data_shape), feat_lower=self._get_lower(self.data_shape), need_run=need_run ) return meta_proto_obj def _get_param(self): min_max_scale_param_dict = {} if self.header: scale_column_idx_set = set(self.scale_column_idx) for i, header in enumerate(self.header): if i in scale_column_idx_set: param_obj = ColumnScaleParam(column_upper=self.column_max_value[i], column_lower=self.column_min_value[i]) min_max_scale_param_dict[header] = param_obj param_proto_obj = ScaleParam(col_scale_param=min_max_scale_param_dict, header=self.header) return param_proto_obj
5,676
37.619048
129
py
FATE
FATE-master/python/federatedml/feature/feature_scale/standard_scale.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import functools import numpy as np from federatedml.protobuf.generated.feature_scale_meta_pb2 import ScaleMeta from federatedml.protobuf.generated.feature_scale_param_pb2 import ScaleParam from federatedml.protobuf.generated.feature_scale_param_pb2 import ColumnScaleParam from federatedml.feature.feature_scale.base_scale import BaseScale from federatedml.statistic.statics import MultivariateStatisticalSummary from federatedml.util import LOGGER class StandardScale(BaseScale): """ Standardize features by removing the mean and scaling to unit variance. The standard score of a sample x is calculated as: z = (x - u) / s, where u is the mean of the training samples, and s is the standard deviation of the training samples """ def __init__(self, params): super().__init__(params) self.with_mean = params.with_mean self.with_std = params.with_std self.mean = None self.std = None def set_param(self, mean, std): self.mean = mean self.std = std @staticmethod def __scale_with_column_range(data, column_upper, column_lower, mean, std, process_cols_list): features = np.array(data.features, dtype=float) for i in process_cols_list: value = data.features[i] if value > column_upper[i]: value = column_upper[i] elif value < column_lower[i]: value = column_lower[i] features[i] = (value - mean[i]) / std[i] _data = copy.deepcopy(data) _data.features = features return _data @staticmethod def __scale(data, mean, std, process_cols_list): features = np.array(data.features, dtype=float) for i in process_cols_list: features[i] = (data.features[i] - mean[i]) / std[i] _data = copy.deepcopy(data) _data.features = features return _data def fit(self, data): """ Apply standard scale for input data Parameters ---------- data: data_instance, input data Returns ---------- data:data_instance, data after scale mean: list, each column mean value std: list, each column standard deviation """ self.column_min_value, self.column_max_value = self._get_min_max_value(data) self.scale_column_idx = self._get_scale_column_idx(data) self.header = self._get_header(data) self.data_shape = self._get_data_shape(data) # fit column value if larger than parameter upper or less than parameter lower data = self.fit_feature_range(data) if not self.with_mean and not self.with_std: self.mean = [0 for _ in range(self.data_shape)] self.std = [1 for _ in range(self.data_shape)] else: self.summary_obj = MultivariateStatisticalSummary(data, -1) if self.with_mean: self.mean = self.summary_obj.get_mean() self.mean = [self.mean[key] for key in self.header] else: self.mean = [0 for _ in range(self.data_shape)] if self.with_std: self.std = self.summary_obj.get_std_variance() self.std = [self.std[key] for key in self.header] for i, value in enumerate(self.std): if np.abs(value - 0) < 1e-6: self.std[i] = 1 else: self.std = [1 for _ in range(self.data_shape)] f = functools.partial(self.__scale, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx) fit_data = data.mapValues(f) return fit_data def transform(self, data): """ Transform input data using standard scale with fit results Parameters ---------- data: data_instance, input data Returns ---------- transform_data:data_instance, data after transform """ f = functools.partial(self.__scale_with_column_range, column_upper=self.column_max_value, column_lower=self.column_min_value, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx) transform_data = data.mapValues(f) return transform_data def _get_meta(self, need_run): if self.header: scale_column = [self.header[i] for i in self.scale_column_idx] else: scale_column = ["_".join(["col", str(i)]) for i in self.scale_column_idx] if not self.data_shape: self.data_shape = -1 meta_proto_obj = ScaleMeta(method="standard_scale", area="null", scale_column=scale_column, feat_upper=self._get_upper(self.data_shape), feat_lower=self._get_lower(self.data_shape), with_mean=self.with_mean, with_std=self.with_std, need_run=need_run ) return meta_proto_obj def _get_param(self): column_scale_param_dict = {} scale_column_idx_set = set(self.scale_column_idx) if self.header: for i, header in enumerate(self.header): if i in scale_column_idx_set: param_obj = ColumnScaleParam(column_upper=self.column_max_value[i], column_lower=self.column_min_value[i], mean=self.mean[i], std=self.std[i]) column_scale_param_dict[header] = param_obj param_proto_obj = ScaleParam(col_scale_param=column_scale_param_dict, header=self.header) return param_proto_obj
6,627
36.659091
126
py
FATE
FATE-master/python/federatedml/feature/feature_scale/base_scale.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import functools from collections import Iterable from federatedml.statistic import data_overview from federatedml.statistic.data_overview import get_header from federatedml.statistic.statics import MultivariateStatisticalSummary from federatedml.util import consts from federatedml.util import LOGGER class BaseScale(object): def __init__(self, params): # self.area = params.area self.mode = params.mode self.param_scale_col_indexes = params.scale_col_indexes self.param_scale_names = params.scale_names self.feat_upper = params.feat_upper self.feat_lower = params.feat_lower self.data_shape = None self.header = None self.scale_column_idx = [] self.summary_obj = None self.model_param_name = 'ScaleParam' self.model_meta_name = 'ScaleMeta' self.column_min_value = None self.column_max_value = None self.round_num = 6 def _get_data_shape(self, data): if not self.data_shape: self.data_shape = data_overview.get_features_shape(data) return self.data_shape def _get_header(self, data): header = get_header(data) return header def _get_upper(self, data_shape): if isinstance(self.feat_upper, Iterable): return list(map(str, self.feat_upper)) else: if self.feat_upper is None: return ["None" for _ in range(data_shape)] else: return [str(self.feat_upper) for _ in range(data_shape)] def _get_lower(self, data_shape): if isinstance(self.feat_lower, Iterable): return list(map(str, self.feat_lower)) else: if self.feat_lower is None: return ["None" for _ in range(data_shape)] else: return [str(self.feat_lower) for _ in range(data_shape)] def _get_scale_column_idx(self, data): data_shape = self._get_data_shape(data) if self.param_scale_col_indexes != -1: if isinstance(self.param_scale_col_indexes, list): if len(self.param_scale_col_indexes) > 0: max_col_idx = max(self.param_scale_col_indexes) if max_col_idx >= data_shape: raise ValueError( "max column index in area is:{}, should less than data shape:{}".format(max_col_idx, data_shape)) scale_column_idx = self.param_scale_col_indexes header = data_overview.get_header(data) scale_names = set(header).intersection(set(self.param_scale_names)) idx_from_name = list(map(lambda n: header.index(n), scale_names)) scale_column_idx = scale_column_idx + idx_from_name scale_column_idx = sorted(set(scale_column_idx)) else: LOGGER.warning( "parameter scale_column_idx should be a list, but not:{}, set scale column to all columns".format( type(self.param_scale_col_indexes))) scale_column_idx = [i for i in range(data_shape)] else: scale_column_idx = [i for i in range(data_shape)] return scale_column_idx def __check_equal(self, size1, size2): if size1 != size2: raise ValueError("Check equal failed, {} != {}".format(size1, size2)) def __get_min_max_value_by_normal(self, data): data_shape = self._get_data_shape(data) self.summary_obj = MultivariateStatisticalSummary(data, -1) header = data.schema.get("header") column_min_value = self.summary_obj.get_min() column_min_value = [column_min_value[key] for key in header] column_max_value = self.summary_obj.get_max() column_max_value = [column_max_value[key] for key in header] scale_column_idx_set = set(self._get_scale_column_idx(data)) if self.feat_upper is not None: if isinstance(self.feat_upper, list): self.__check_equal(data_shape, len(self.feat_upper)) for i in range(data_shape): if i in scale_column_idx_set: if column_max_value[i] > self.feat_upper[i]: column_max_value[i] = self.feat_upper[i] if column_min_value[i] > self.feat_upper[i]: column_min_value[i] = self.feat_upper[i] else: for i in range(data_shape): if i in scale_column_idx_set: if column_max_value[i] > self.feat_upper: column_max_value[i] = self.feat_upper if column_min_value[i] > self.feat_upper: column_min_value[i] = self.feat_upper if self.feat_lower is not None: if isinstance(self.feat_lower, list): self.__check_equal(data_shape, len(self.feat_lower)) for i in range(data_shape): if i in scale_column_idx_set: if column_min_value[i] < self.feat_lower[i]: column_min_value[i] = self.feat_lower[i] if column_max_value[i] < self.feat_lower[i]: column_max_value[i] = self.feat_lower[i] else: for i in range(data_shape): if i in scale_column_idx_set: if column_min_value[i] < self.feat_lower: column_min_value[i] = self.feat_lower if column_max_value[i] < self.feat_lower: column_max_value[i] = self.feat_lower return column_min_value, column_max_value def __get_min_max_value_by_cap(self, data): data_shape = self._get_data_shape(data) self.summary_obj = MultivariateStatisticalSummary(data, -1) header = data.schema.get("header") if self.feat_upper is None: self.feat_upper = 1.0 if self.feat_lower is None: self.feat_lower = 0 if self.feat_upper < self.feat_lower: raise ValueError("feat_upper should not less than feat_lower") column_min_value = self.summary_obj.get_quantile_point(self.feat_lower) column_min_value = [column_min_value[key] for key in header] column_max_value = self.summary_obj.get_quantile_point(self.feat_upper) column_max_value = [column_max_value[key] for key in header] self.__check_equal(data_shape, len(column_min_value)) self.__check_equal(data_shape, len(column_max_value)) return column_min_value, column_max_value def _get_min_max_value(self, data): """ Get each column minimum and maximum """ if self.mode == consts.NORMAL: return self.__get_min_max_value_by_normal(data) elif self.mode == consts.CAP: return self.__get_min_max_value_by_cap(data) else: raise ValueError("unknown mode of {}".format(self.mode)) def set_column_range(self, upper, lower): self.column_max_value = upper self.column_min_value = lower @staticmethod def reset_feature_range(data, column_max_value, column_min_value, scale_column_idx): _data = copy.deepcopy(data) for i in scale_column_idx: value = _data.features[i] if value > column_max_value[i]: _data.features[i] = column_max_value[i] elif value < column_min_value[i]: _data.features[i] = column_min_value[i] return _data def fit_feature_range(self, data): if self.feat_lower is not None or self.feat_upper is not None: LOGGER.info("Need fit feature range") if not isinstance(self.column_min_value, Iterable) or not isinstance(self.column_max_value, Iterable): LOGGER.info( "column_min_value type is:{}, column_min_value type is:{} , should be iterable, start to get new one".format( type( self.column_min_value), type( self.column_max_value))) self.column_min_value, self.column_max_value = self._get_min_max_value(data) if not self.scale_column_idx: self.scale_column_idx = self._get_scale_column_idx(data) LOGGER.info("scale_column_idx is None, start to get new one, new scale_column_idx:{}".format( self.scale_column_idx)) f = functools.partial(self.reset_feature_range, column_max_value=self.column_max_value, column_min_value=self.column_min_value, scale_column_idx=self.scale_column_idx) fit_data = data.mapValues(f) fit_data.schema = data.schema return fit_data else: LOGGER.info("feat_lower is None and feat_upper is None, do not need to fit feature range!") return data def get_model_summary(self): cols_info = self._get_param().col_scale_param return { col_name: { "column_upper": col.column_upper, "column_lower": col.column_lower, "mean": col.mean, "std": col.std} for col_name, col in cols_info.items()} def export_model(self, need_run): meta_obj = self._get_meta(need_run) param_obj = self._get_param() result = { self.model_meta_name: meta_obj, self.model_param_name: param_obj } return result def fit(self, data): pass def transform(self, data): pass def load_model(self, name, namespace): pass def save_model(self, name, namespace): pass def _get_param(self): pass def _get_meta(self, need_run): pass
10,746
38.08
129
py
FATE
FATE-master/python/federatedml/feature/feature_scale/__init__.py
0
0
0
py
FATE
FATE-master/python/federatedml/feature/feature_scale/test/min_max_scale_test.py
import copy import time import unittest import numpy as np from fate_arch.session import computing_session as session from sklearn.preprocessing import MinMaxScaler as MMS from federatedml.feature.feature_scale.min_max_scale import MinMaxScale from federatedml.feature.instance import Instance from federatedml.param.scale_param import ScaleParam from federatedml.util.param_extract import ParamExtract class TestMinMaxScaler(unittest.TestCase): def setUp(self): self.test_data = [ [0, 1, 10, 2, 3, 1], [1, 2, 9, 2, 4, 2], [0, 3, 8, 3, 3, 3], [1, 4, 7, 4, 4, 4], [1, 5, 6, 5, 5, 5], [1, 6, 5, 6, 6, -100], [0, 7, 4, 7, 7, 7], [0, 8, 3, 8, 6, 8], [0, 9, 2, 9, 9, 9], [0, 10, 1, 10, 10, 10] ] str_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) self.test_instance = [] for td in self.test_data: # self.test_instance.append(Instance(features=td)) self.test_instance.append(Instance(features=np.array(td, dtype=float))) session.init(str_time) self.table_instance = self.data_to_table(self.test_instance) self.table_instance.schema['header'] = ["fid" + str(i) for i in range(len(self.test_data[0]))] self.table_instance.schema['anonymous_header'] = [ "guest_9999_x" + str(i) for i in range(len(self.test_data[0]))] def print_table(self, table): for v in (list(table.collect())): print("id:{}, value:{}".format(v[0], v[1].features)) def data_to_table(self, data, partition=1): data_table = session.parallelize(data, include_key=False, partition=partition) return data_table def sklearn_attribute_format(self, scaler, feature_range): format_output = [] for i in range(scaler.data_min_.shape[0]): col_transform_value = (scaler.data_min_[i], scaler.data_max_[i]) format_output.append(col_transform_value) return format_output def get_table_instance_feature(self, table_instance): res_list = [] for k, v in list(table_instance.collect()): res_list.append(list(v.features)) return res_list def get_scale_param(self): component_param = { "method": "standard_scale", "mode": "normal", "scale_col_indexes": [] } scale_param = ScaleParam() param_extracter = ParamExtract() param_extracter.parse_param_from_config(scale_param, component_param) print("scale_param:{}".format(type(scale_param))) return scale_param # test with (mode='normal', area='all', feat_upper=None, feat_lower=None) def test_fit_instance_default(self): scale_param = self.get_scale_param() scale_param.scale_col_indexes = -1 scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value scaler = MMS() scaler.fit(self.test_data) self.assertListEqual(np.round(self.get_table_instance_feature(fit_instance), 6).tolist(), np.around(scaler.transform(self.test_data), 6).tolist()) data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="all", upper=2, lower=1): def test_fit1(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [] scale_param.feat_upper = 2 scale_param.feat_lower = 1 scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 scaler = MMS() scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 6).tolist()) data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="all", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit2(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [] scale_param.feat_upper = [2, 2, 2, 2, 2, 2] scale_param.feat_lower = [1, 1, 1, 1, 1, 1] scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 scaler = MMS() scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 6).tolist()) data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit3(self): scale_column_idx = [1, 2, 4] scale_param = self.get_scale_param() # scale_param.area = "col" scale_param.feat_upper = [2, 2, 2, 2, 2, 2] scale_param.feat_lower = [1, 1, 1, 1, 1, 1] scale_param.scale_col_indexes = scale_column_idx scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value raw_data = copy.deepcopy(self.test_data) for i, line in enumerate(self.test_data): for j, value in enumerate(line): if j in scale_column_idx: if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 scaler = MMS() scaler.fit(self.test_data) sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist() for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data) for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit4(self): scale_column_idx = [1, 2, 4] scale_param = self.get_scale_param() # scale_param.area = "col" scale_param.feat_upper = 2 scale_param.feat_lower = 1 scale_param.scale_col_indexes = scale_column_idx scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value raw_data = copy.deepcopy(self.test_data) for i, line in enumerate(self.test_data): for j, value in enumerate(line): if j in scale_column_idx: if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 scaler = MMS() scaler.fit(self.test_data) sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist() for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data) for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit5(self): scale_column_idx = [1, 2, 4] scale_param = self.get_scale_param() scale_param.mode = "cap" # scale_param.area = "col" scale_param.feat_upper = 0.8 scale_param.feat_lower = 0.2 scale_param.scale_col_indexes = scale_column_idx scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value raw_data = copy.deepcopy(self.test_data) gt_cap_lower_list = [0, 2, 2, 2, 3, 1] gt_cap_upper_list = [1, 8, 8, 8, 7, 8] for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > gt_cap_upper_list[j]: self.test_data[i][j] = gt_cap_upper_list[j] elif value < gt_cap_lower_list[j]: self.test_data[i][j] = gt_cap_lower_list[j] scaler = MMS() scaler.fit(self.test_data) sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist() for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data) for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit5(self): scale_column_idx = [1, 2, 4] scale_names = ['fid1', 'fid2', 'fid4', 'fid1000'] scale_param = self.get_scale_param() scale_param.mode = "cap" # scale_param.area = "col" scale_param.feat_upper = 0.8 scale_param.feat_lower = 0.2 scale_param.scale_names = scale_names scale_param.scale_col_indexes = [] scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value raw_data = copy.deepcopy(self.test_data) gt_cap_lower_list = [0, 2, 2, 2, 3, 1] gt_cap_upper_list = [1, 8, 8, 8, 7, 8] for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > gt_cap_upper_list[j]: self.test_data[i][j] = gt_cap_upper_list[j] elif value < gt_cap_lower_list[j]: self.test_data[i][j] = gt_cap_lower_list[j] scaler = MMS() scaler.fit(self.test_data) sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist() for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), sklearn_transform_data) for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) # test with (area="col", scale_column_idx=[1,2,4], upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1]): def test_fit5(self): scale_column_idx = [1, 2, 4] scale_names = ['fid1', 'fid2', 'fid1000'] scale_param = self.get_scale_param() scale_param.mode = "cap" # scale_param.area = "col" scale_param.feat_upper = 0.8 scale_param.feat_lower = 0.2 scale_param.scale_names = scale_names scale_param.scale_col_indexes = [2, 4] scale_obj = MinMaxScale(scale_param) fit_instance = scale_obj.fit(self.table_instance) column_min_value = scale_obj.column_min_value column_max_value = scale_obj.column_max_value raw_data = copy.deepcopy(self.test_data) gt_cap_lower_list = [0, 2, 2, 2, 3, 1] gt_cap_upper_list = [1, 8, 8, 8, 7, 8] for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > gt_cap_upper_list[j]: self.test_data[i][j] = gt_cap_upper_list[j] elif value < gt_cap_lower_list[j]: self.test_data[i][j] = gt_cap_lower_list[j] scaler = MMS() scaler.fit(self.test_data) sklearn_transform_data = np.around(scaler.transform(self.test_data), 6).tolist() for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] fit_data = np.round(self.get_table_instance_feature(fit_instance), 6).tolist() self.assertListEqual(fit_data, sklearn_transform_data) for i, line in enumerate(sklearn_transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: sklearn_transform_data[i][j] = raw_data[i][j] data_min = list(scaler.data_min_) data_max = list(scaler.data_max_) self.assertListEqual(column_min_value, data_min) self.assertListEqual(column_max_value, data_max) transform_data = scale_obj.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(transform_data)) def tearDown(self): session.stop() if __name__ == "__main__": unittest.main()
17,455
40.661098
102
py
FATE
FATE-master/python/federatedml/feature/feature_scale/test/standard_scale_test.py
import copy import time import unittest import numpy as np from fate_arch.session import computing_session as session from sklearn.preprocessing import StandardScaler as SSL from federatedml.feature.feature_scale.standard_scale import StandardScale from federatedml.feature.instance import Instance from federatedml.param.scale_param import ScaleParam from federatedml.util.param_extract import ParamExtract class TestStandardScaler(unittest.TestCase): def setUp(self): self.test_data = [ [0, 1.0, 10, 2, 3, 1], [1.0, 2, 9, 2, 4, 2], [0, 3.0, 8, 3, 3, 3], [1.0, 4, 7, 4, 4, 4], [1.0, 5, 6, 5, 5, 5], [1.0, 6, 5, 6, 6, -100], [0, 7.0, 4, 7, 7, 7], [0, 8, 3.0, 8, 6, 8], [0, 9, 2, 9.0, 9, 9], [0, 10, 1, 10.0, 10, 10] ] str_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) session.init(str_time) self.test_instance = [] for td in self.test_data: self.test_instance.append(Instance(features=np.array(td))) self.table_instance = self.data_to_table(self.test_instance) self.table_instance.schema['header'] = ["fid" + str(i) for i in range(len(self.test_data[0]))] self.table_instance.schema['anonymous_header'] = [ "guest_9999_x" + str(i) for i in range(len(self.test_data[0]))] def print_table(self, table): for v in (list(table.collect())): print(v[1].features) def data_to_table(self, data, partition=10): data_table = session.parallelize(data, include_key=False, partition=partition) return data_table def get_table_instance_feature(self, table_instance): res_list = [] for k, v in list(table_instance.collect()): res_list.append(list(np.around(v.features, 4))) return res_list def get_scale_param(self): component_param = { "method": "standard_scale", "mode": "normal", "scale_col_indexes": [], "with_mean": True, "with_std": True, } scale_param = ScaleParam() param_extracter = ParamExtract() param_extracter.parse_param_from_config(scale_param, component_param) return scale_param # test with (with_mean=True, with_std=True): def test_fit1(self): scale_param = self.get_scale_param() standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL() scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 4).tolist()) self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4))) self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4))) # test with (with_mean=False, with_std=True): def test_fit2(self): scale_param = self.get_scale_param() scale_param.with_mean = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL(with_mean=False) scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 4).tolist()) self.assertListEqual(list(np.around(mean, 4)), [0 for _ in mean]) self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4))) # test with (with_mean=True, with_std=False): def test_fit3(self): scale_param = self.get_scale_param() scale_param.with_std = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL(with_std=False) scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 4).tolist()) self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4))) self.assertListEqual(list(np.around(std, 4)), [1 for _ in std]) # test with (with_mean=False, with_std=False): def test_fit4(self): scale_param = self.get_scale_param() scale_param.with_std = False scale_param.with_mean = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL(with_mean=False, with_std=False) scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 4).tolist()) self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))]) self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))]) # test with (area="all", scale_column_idx=[], with_mean=True, with_std=True): def test_fit5(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [] standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL() scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(scaler.transform(self.test_data), 4).tolist()) self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4))) self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4))) # test with (area="col", scale_column_idx=[], with_mean=True, with_std=True): def test_fit6(self): scale_param = self.get_scale_param() scale_param.scale_col_indexes = [] standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std scaler = SSL() scaler.fit(self.test_data) self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(self.test_data, 4).tolist()) self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4))) self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4))) # test with (area="all", upper=2, lower=1, with_mean=False, with_std=False): def test_fit7(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [] scale_param.feat_upper = 2 scale_param.feat_lower = 1 scale_param.with_mean = False scale_param.with_std = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std column_max_value = standard_scaler.column_max_value column_min_value = standard_scaler.column_min_value for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(self.test_data, 4).tolist()) self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))]) self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))]) self.assertEqual(column_max_value, [1, 2, 2, 2, 2, 2]) self.assertEqual(column_min_value, [1, 1, 1, 2, 2, 1]) # test with (area="all", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1], with_mean=False, with_std=False): def test_fit8(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [] scale_param.feat_upper = [2, 2, 2, 2, 2, 2] scale_param.feat_lower = [1, 1, 1, 1, 1, 1] scale_param.with_mean = False scale_param.with_std = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std column_max_value = standard_scaler.column_max_value column_min_value = standard_scaler.column_min_value for i, line in enumerate(self.test_data): for j, value in enumerate(line): if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 self.assertListEqual(self.get_table_instance_feature(fit_instance), np.around(self.test_data, 4).tolist()) self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))]) self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))]) self.assertEqual(column_max_value, [1, 2, 2, 2, 2, 2]) self.assertEqual(column_min_value, [1, 1, 1, 2, 2, 1]) # test with (area="col", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1], # scale_column_idx=[1,2,4], with_mean=True, with_std=True): def test_fit9(self): scale_column_idx = [1, 2, 4] scale_param = self.get_scale_param() scale_param.feat_upper = [2, 2, 2, 2, 2, 2] scale_param.feat_lower = [1, 1, 1, 1, 1, 1] scale_param.with_mean = True scale_param.with_std = True scale_param.scale_col_indexes = scale_column_idx standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std column_max_value = standard_scaler.column_max_value column_min_value = standard_scaler.column_min_value raw_data = copy.deepcopy(self.test_data) for i, line in enumerate(self.test_data): for j, value in enumerate(line): if j in scale_column_idx: if value > 2: self.test_data[i][j] = 2 elif value < 1: self.test_data[i][j] = 1 scaler = SSL(with_mean=True, with_std=True) scaler.fit(self.test_data) transform_data = np.around(scaler.transform(self.test_data), 4).tolist() for i, line in enumerate(transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), transform_data) self.assertListEqual(list(np.around(mean, 6)), list(np.around(scaler.mean_, 6))) self.assertListEqual(list(np.around(std, 6)), list(np.around(scaler.scale_, 6))) self.assertEqual(column_max_value, [1, 2, 2, 10, 2, 10]) self.assertEqual(column_min_value, [0, 1, 1, 2, 2, -100]) raw_data_transform = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(raw_data_transform)) # test with (mode="cap", area="col", upper=0.8, lower=0.2, scale_column_idx=[1,2,4], with_mean=True, with_std=True): def test_fit10(self): scale_column_idx = [1, 2, 4] scale_param = self.get_scale_param() scale_param.scale_col_indexes = [] scale_param.feat_upper = 0.8 scale_param.feat_lower = 0.2 scale_param.with_mean = True scale_param.with_std = True scale_param.mode = "cap" scale_param.scale_col_indexes = scale_column_idx standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) mean = standard_scaler.mean std = standard_scaler.std column_max_value = standard_scaler.column_max_value column_min_value = standard_scaler.column_min_value gt_cap_lower_list = [0, 2, 2, 2, 3, 1] gt_cap_upper_list = [1, 8, 8, 8, 7, 8] raw_data = copy.deepcopy(self.test_data) for i, line in enumerate(self.test_data): for j, value in enumerate(line): if j in scale_column_idx: if value > gt_cap_upper_list[j]: self.test_data[i][j] = gt_cap_upper_list[j] elif value < gt_cap_lower_list[j]: self.test_data[i][j] = gt_cap_lower_list[j] scaler = SSL(with_mean=True, with_std=True) scaler.fit(self.test_data) transform_data = np.around(scaler.transform(self.test_data), 4).tolist() for i, line in enumerate(transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: transform_data[i][j] = raw_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_instance), transform_data) self.assertEqual(column_max_value, gt_cap_upper_list) self.assertEqual(column_min_value, gt_cap_lower_list) self.assertListEqual(list(np.around(mean, 6)), list(np.around(scaler.mean_, 6))) self.assertListEqual(list(np.around(std, 6)), list(np.around(scaler.scale_, 6))) raw_data_transform = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(fit_instance), self.get_table_instance_feature(raw_data_transform)) # test with (with_mean=True, with_std=True): def test_transform1(self): scale_param = self.get_scale_param() standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) # test with (with_mean=True, with_std=False): def test_transform2(self): scale_param = self.get_scale_param() scale_param.with_std = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) # test with (with_mean=False, with_std=True): def test_transform3(self): scale_param = self.get_scale_param() scale_param.with_mean = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) # test with (with_mean=False, with_std=False): def test_transform4(self): scale_param = self.get_scale_param() scale_param.with_mean = False scale_param.with_std = False standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) # test with (area='all', scale_column_idx=[], with_mean=False, with_std=False): def test_transform5(self): scale_param = self.get_scale_param() scale_param.with_mean = False scale_param.with_std = False scale_param.scale_column_idx = [] standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) # test with (area='col', with_mean=[], with_std=False): def test_transform6(self): scale_param = self.get_scale_param() scale_param.with_mean = False scale_param.with_std = False scale_param.scale_column_idx = [] standard_scaler = StandardScale(scale_param) fit_instance = standard_scaler.fit(self.table_instance) transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(transform_data), self.get_table_instance_feature(fit_instance)) def test_cols_select_fit_and_transform(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [1, 2, 4] standard_scaler = StandardScale(scale_param) fit_data = standard_scaler.fit(self.table_instance) scale_column_idx = standard_scaler.scale_column_idx scaler = SSL(with_mean=True, with_std=True) scaler.fit(self.test_data) transform_data = np.around(scaler.transform(self.test_data), 4).tolist() for i, line in enumerate(transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: transform_data[i][j] = self.test_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_data), transform_data) std_scale_transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(std_scale_transform_data), transform_data) def test_cols_select_fit_and_transform_repeat(self): scale_param = self.get_scale_param() scale_param.scale_column_idx = [1, 1, 2, 2, 4, 5, 5] standard_scaler = StandardScale(scale_param) fit_data = standard_scaler.fit(self.table_instance) scale_column_idx = standard_scaler.scale_column_idx scaler = SSL(with_mean=True, with_std=True) scaler.fit(self.test_data) transform_data = np.around(scaler.transform(self.test_data), 4).tolist() for i, line in enumerate(transform_data): for j, cols in enumerate(line): if j not in scale_column_idx: transform_data[i][j] = self.test_data[i][j] self.assertListEqual(self.get_table_instance_feature(fit_data), transform_data) std_scale_transform_data = standard_scaler.transform(self.table_instance) self.assertListEqual(self.get_table_instance_feature(std_scale_transform_data), transform_data) def tearDown(self): session.stop() if __name__ == "__main__": unittest.main()
19,500
42.143805
120
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_selection/base_feature_selection.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import random from federatedml.feature.feature_selection import filter_factory from federatedml.feature.feature_selection.model_adapter.adapter_factory import adapter_factory from federatedml.feature.feature_selection.selection_properties import SelectionProperties, CompletedSelectionResults from federatedml.model_base import ModelBase from federatedml.param.feature_selection_param import FeatureSelectionParam from federatedml.protobuf.generated import feature_selection_param_pb2, feature_selection_meta_pb2 from federatedml.statistic.data_overview import get_header, \ get_anonymous_header, look_up_names_from_header, header_alignment from federatedml.transfer_variable.transfer_class.hetero_feature_selection_transfer_variable import \ HeteroFeatureSelectionTransferVariable from federatedml.util import LOGGER from federatedml.util import abnormal_detection from federatedml.util import consts from federatedml.util.io_check import assert_io_num_rows_equal from federatedml.util.schema_check import assert_schema_consistent MODEL_PARAM_NAME = 'FeatureSelectionParam' MODEL_META_NAME = 'FeatureSelectionMeta' MODEL_NAME = 'HeteroFeatureSelection' class BaseHeteroFeatureSelection(ModelBase): def __init__(self): super(BaseHeteroFeatureSelection, self).__init__() self.transfer_variable = HeteroFeatureSelectionTransferVariable() self.curt_select_properties = SelectionProperties() self.completed_selection_result = CompletedSelectionResults() self.loaded_local_select_properties = dict() self.loaded_host_filter_results = dict() self.schema = None self.header = None self.anonymous_header = None self.party_name = 'Base' # Possible previous model self.binning_model = None self.static_obj = None self.model_param = FeatureSelectionParam() # self.meta_dicts = {} self.meta_list = [] self.isometric_models = {} def _init_model(self, params): self.model_param = params # self.cols_index = params.select_cols self.filter_methods = params.filter_methods # self.local_only = params.local_only def _init_select_params(self, data_instances): if self.schema is None: self.schema = data_instances.schema if self.header is not None: # load current data anonymous header for prediction with model of version < 1.9.0 # if len(self.completed_selection_result.anonymous_header) == 0: if self.anonymous_header is None: data_anonymous_header = get_anonymous_header(data_instances) # LOGGER.info(f"data_anonymous_header: {data_anonymous_header}") self.anonymous_header = data_anonymous_header self.completed_selection_result.set_anonymous_header(data_anonymous_header) if self.role == consts.HOST: anonymous_header_in_old_format = self.anonymous_generator. \ generated_compatible_anonymous_header_with_old_version(data_anonymous_header) anonymous_dict = dict(zip(anonymous_header_in_old_format, data_anonymous_header)) self.transfer_variable.host_anonymous_header_dict.remote(anonymous_dict, role=consts.GUEST, idx=0) for filter_name, select_properties in self.loaded_local_select_properties.items(): self.completed_selection_result.add_filter_results(filter_name, select_properties) else: host_anonymous_dict_list = self.transfer_variable.host_anonymous_header_dict.get(idx=-1) for filter_name, cur_select_properties in self.loaded_local_select_properties.items(): cur_host_select_properties_list = [] host_feature_values_obj_list, host_left_cols_obj_list = self.loaded_host_filter_results[ filter_name] for i, host_left_cols_obj in enumerate(host_left_cols_obj_list): cur_host_select_properties = SelectionProperties() old_host_header = list(host_anonymous_dict_list[i].keys()) host_feature_values = host_feature_values_obj_list[i].feature_values cur_host_select_properties.load_properties_with_new_header(old_host_header, host_feature_values, host_left_cols_obj, host_anonymous_dict_list[i]) cur_host_select_properties_list.append(cur_host_select_properties) self.completed_selection_result.add_filter_results(filter_name, cur_select_properties, cur_host_select_properties_list) return self.schema = data_instances.schema header = get_header(data_instances) anonymous_header = get_anonymous_header(data_instances) self.header = header self.anonymous_header = anonymous_header self.curt_select_properties.set_header(header) # use anonymous header of input data self.curt_select_properties.set_anonymous_header(anonymous_header) self.curt_select_properties.set_last_left_col_indexes([x for x in range(len(header))]) if self.model_param.select_col_indexes == -1: self.curt_select_properties.set_select_all_cols() else: self.curt_select_properties.add_select_col_indexes(self.model_param.select_col_indexes) if self.model_param.use_anonymous: select_names = look_up_names_from_header(self.model_param.select_names, anonymous_header, header) # LOGGER.debug(f"use_anonymous is true, select names: {select_names}") else: select_names = self.model_param.select_names self.curt_select_properties.add_select_col_names(select_names) self.completed_selection_result.set_header(header) self.completed_selection_result.set_anonymous_header(anonymous_header) self.completed_selection_result.set_select_col_names(self.curt_select_properties.select_col_names) self.completed_selection_result.set_all_left_col_indexes(self.curt_select_properties.all_left_col_indexes) def _get_meta(self): meta_dicts = {'filter_methods': self.filter_methods, 'cols': self.completed_selection_result.get_select_col_names(), 'need_run': self.need_run, "filter_metas": self.meta_list} meta_protobuf_obj = feature_selection_meta_pb2.FeatureSelectionMeta(**meta_dicts) return meta_protobuf_obj def _get_param(self): # LOGGER.debug("curt_select_properties.left_col_name: {}, completed_selection_result: {}".format( # self.curt_select_properties.left_col_names, self.completed_selection_result.all_left_col_names # )) # LOGGER.debug("Length of left cols: {}".format(len(self.completed_selection_result.all_left_col_names))) # left_cols = {x: True for x in self.curt_select_properties.left_col_names} left_cols = {x: True for x in self.completed_selection_result.all_left_col_names} final_left_cols = feature_selection_param_pb2.LeftCols( original_cols=self.completed_selection_result.get_select_col_names(), left_cols=left_cols ) host_col_names = [] if self.role == consts.GUEST: for host_id, this_host_name in enumerate(self.completed_selection_result.get_host_sorted_col_names()): party_id = self.component_properties.host_party_idlist[host_id] # LOGGER.debug("In _get_param, this_host_name: {}, party_id: {}".format(this_host_name, party_id)) host_col_names.append(feature_selection_param_pb2.HostColNames(col_names=this_host_name, party_id=str(party_id))) else: party_id = self.component_properties.local_partyid # if self.anonymous_header: # anonymous_names = self.anonymous_header """else: anonymous_names = [anonymous_generator.generate_anonymous(fid, model=self) for fid in range(len(self.header))] """ host_col_names.append(feature_selection_param_pb2.HostColNames(col_names=self.anonymous_header, party_id=str(party_id))) col_name_to_anonym_dict = None if self.header and self.anonymous_header: col_name_to_anonym_dict = dict(zip(self.header, self.anonymous_header)) result_obj = feature_selection_param_pb2.FeatureSelectionParam( results=self.completed_selection_result.filter_results, final_left_cols=final_left_cols, col_names=self.completed_selection_result.get_sorted_col_names(), host_col_names=host_col_names, header=self.curt_select_properties.header, col_name_to_anonym_dict=col_name_to_anonym_dict ) return result_obj def save_data(self): return self.data_output def export_model(self): # LOGGER.debug("Model output is : {}".format(self.model_output)) """ if self.model_output is not None: LOGGER.debug("model output already exists, return directly") return self.model_output """ meta_obj = self._get_meta() param_obj = self._get_param() result = { MODEL_META_NAME: meta_obj, MODEL_PARAM_NAME: param_obj } self.model_output = result return result def _load_selection_model(self, model_dict): LOGGER.debug("Feature selection need run: {}".format(self.need_run)) if not self.need_run: return model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME) model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME) self.model_output = { MODEL_META_NAME: model_meta, MODEL_PARAM_NAME: model_param } header = list(model_param.header) # LOGGER.info(f"col_name_to_anonym_dict: {model_param.col_name_to_anonym_dict}") self.header = header self.curt_select_properties.set_header(header) self.completed_selection_result.set_header(header) self.curt_select_properties.set_last_left_col_indexes([x for x in range(len(header))]) self.curt_select_properties.add_select_col_names(header) # for model ver >= 1.9.0 if model_param.col_name_to_anonym_dict: col_name_to_anonym_dict = dict(model_param.col_name_to_anonym_dict) self.anonymous_header = [col_name_to_anonym_dict[x] for x in header] self.completed_selection_result.set_anonymous_header(self.anonymous_header) host_col_names_list = model_param.host_col_names for result in model_param.results: cur_select_properties = copy.deepcopy(self.curt_select_properties) feature_values, left_cols_obj = dict(result.feature_values), result.left_cols cur_select_properties.load_properties(header, feature_values, left_cols_obj) cur_host_select_properties_list = [] host_feature_values_obj_list = list(result.host_feature_values) host_left_cols_obj_list = list(result.host_left_cols) for i, host_left_cols_obj in enumerate(host_left_cols_obj_list): cur_host_select_properties = SelectionProperties() host_col_names_obj = host_col_names_list[i] host_header = list(host_col_names_obj.col_names) host_feature_values = host_feature_values_obj_list[i].feature_values cur_host_select_properties.load_properties(host_header, host_feature_values, host_left_cols_obj) cur_host_select_properties_list.append(cur_host_select_properties) self.completed_selection_result.add_filter_results(result.filter_name, cur_select_properties, cur_host_select_properties_list) # for model ver 1.8.x else: LOGGER.warning(f"Anonymous column name dictionary not found in given model." f"Will infer host(s)' anonymous names.") """ self.loaded_host_col_names_list = [list(host_col_names_obj.col_names) for host_col_names_obj in model_param.host_col_names] """ for result in model_param.results: cur_select_properties = copy.deepcopy(self.curt_select_properties) feature_values, left_cols_obj = dict(result.feature_values), result.left_cols cur_select_properties.load_properties(header, feature_values, left_cols_obj) # record local select properties self.loaded_local_select_properties[result.filter_name] = cur_select_properties host_feature_values_obj_list = list(result.host_feature_values) host_left_cols_obj_list = list(result.host_left_cols) self.loaded_host_filter_results[result.filter_name] = (host_feature_values_obj_list, host_left_cols_obj_list) final_left_cols_names = dict(model_param.final_left_cols.left_cols) # LOGGER.debug("final_left_cols_names: {}".format(final_left_cols_names)) for col_name, _ in final_left_cols_names.items(): self.curt_select_properties.add_left_col_name(col_name) self.completed_selection_result.add_filter_results(filter_name='conclusion', select_properties=self.curt_select_properties) self.update_curt_select_param() def _load_isometric_model(self, iso_model): LOGGER.debug(f"When loading isometric_model, iso_model names are:" f" {iso_model.keys()}") for cpn_name, model_dict in iso_model.items(): model_param = None model_meta = None for name, model_pb in model_dict.items(): if name.endswith("Param"): model_param = model_pb else: model_meta = model_pb model_name = model_param.model_name if model_name in self.isometric_models: raise ValueError("Should not load two same type isometric models" " in feature selection") adapter = adapter_factory(model_name) this_iso_model = adapter.convert(model_meta, model_param) self.isometric_models[model_name] = this_iso_model def load_model(self, model_dict): LOGGER.debug(f"Start to load model") if 'model' in model_dict: LOGGER.debug("Loading selection model") self._load_selection_model(model_dict) if 'isometric_model' in model_dict: LOGGER.debug("Loading isometric_model") self._load_isometric_model(model_dict['isometric_model']) @staticmethod def select_cols(instance, left_col_idx): instance.features = instance.features[left_col_idx] return instance def _transfer_data(self, data_instances): f = functools.partial(self.select_cols, left_col_idx=self.completed_selection_result.all_left_col_indexes) new_data = data_instances.mapValues(f) # LOGGER.debug("When transfering, all left_col_names: {}".format( # self.completed_selection_result.all_left_col_names # )) new_data = self.set_schema(new_data, self.completed_selection_result.all_left_col_names, self.completed_selection_result.all_left_anonymous_col_names) # one_data = new_data.first()[1] # LOGGER.debug( # "In feature selection transform, Before transform: {}, length: {} After transform: {}, length: {}".format( # before_one_data[1].features, len(before_one_data[1].features), # one_data.features, len(one_data.features))) return new_data def _abnormal_detection(self, data_instances): """ Make sure input data_instances is valid. """ abnormal_detection.empty_table_detection(data_instances) abnormal_detection.empty_feature_detection(data_instances) self.check_schema_content(data_instances.schema) def set_schema(self, data_instance, header=None, anonymous_header=None): if header is None: self.schema["header"] = self.curt_select_properties.header self.schema["anonymous_header"] = self.curt_select_properties.anonymous_header else: self.schema["header"] = header self.schema["anonymous_header"] = anonymous_header data_instance.schema = self.schema return data_instance def update_curt_select_param(self): new_select_properties = SelectionProperties() # all select properties must have the same header new_select_properties.set_header(self.curt_select_properties.header) new_select_properties.set_anonymous_header(self.curt_select_properties.anonymous_header) new_select_properties.set_last_left_col_indexes(self.curt_select_properties.all_left_col_indexes) new_select_properties.add_select_col_names(self.curt_select_properties.left_col_names) self.curt_select_properties = new_select_properties def _filter(self, data_instances, method, suffix, idx=0): this_filter = filter_factory.get_filter(filter_name=method, model_param=self.model_param, role=self.role, model=self, idx=idx) if method == consts.STATISTIC_FILTER: method = self.model_param.statistic_param.metrics[idx] elif method == consts.IV_FILTER: metric = self.model_param.iv_param.metrics[idx] f_type = self.model_param.iv_param.filter_type[idx] method = f"{metric}_{f_type}" elif method == consts.PSI_FILTER: metric = self.model_param.psi_param.metrics[idx] f_type = self.model_param.psi_param.filter_type[idx] method = f"{metric}_{f_type}" this_filter.set_selection_properties(self.curt_select_properties) this_filter.set_transfer_variable(self.transfer_variable) # .info(f"this_filter type: {this_filter.filter_type}, method: {method}, filter obj: {this_filter}") self.curt_select_properties = this_filter.fit(data_instances, suffix).selection_properties # LOGGER.info(f"filter.fit called") host_select_properties = getattr(this_filter, 'host_selection_properties', None) # if host_select_properties is not None: # LOGGER.debug("method: {}, host_select_properties: {}".format( # method, host_select_properties[0].all_left_col_names)) self.completed_selection_result.add_filter_results(filter_name=method, select_properties=self.curt_select_properties, host_select_properties=host_select_properties) last_col_nums = len(self.curt_select_properties.last_left_col_names) left_col_names = self.curt_select_properties.left_col_names self.add_summary(method, { "last_col_nums": last_col_nums, "left_col_nums": len(left_col_names), "left_col_names": left_col_names }) # LOGGER.debug("method: {}, selection_cols: {}, left_cols: {}".format( # method, self.curt_select_properties.select_col_names, self.curt_select_properties.left_col_names)) self.update_curt_select_param() # LOGGER.debug("After updated, method: {}, selection_cols: {}".format( # method, self.curt_select_properties.select_col_names)) self.meta_list.append(this_filter.get_meta_obj()) def fit(self, data_instances): LOGGER.info("Start Hetero Selection Fit and transform.") self._abnormal_detection(data_instances) self._init_select_params(data_instances) original_col_nums = len(self.curt_select_properties.last_left_col_names) empty_cols = False if len(self.curt_select_properties.select_col_indexes) == 0: LOGGER.warning("None of columns has been set to select, " "will randomly select one column to participate in fitting filter(s). " "All columns will be kept, " "but be aware that this may lead to unexpected behavior.") header = data_instances.schema.get("header") select_idx = random.choice(range(len(header))) self.curt_select_properties.select_col_indexes = [select_idx] self.curt_select_properties.select_col_names = [header[select_idx]] empty_cols = True suffix = self.filter_methods if self.role == consts.HOST: self.transfer_variable.host_empty_cols.remote(empty_cols, role=consts.GUEST, idx=0, suffix=suffix) else: host_empty_cols_list = self.transfer_variable.host_empty_cols.get(idx=-1, suffix=suffix) host_list = self.component_properties.host_party_idlist for idx, res in enumerate(host_empty_cols_list): if res: LOGGER.warning(f"Host {host_list[idx]}'s select columns are empty;" f"host {host_list[idx]} will randomly select one " f"column to participate in fitting filter(s). " f"All columns from this host will be kept, " f"but be aware that this may lead to unexpected behavior.") for filter_idx, method in enumerate(self.filter_methods): if method in [consts.STATISTIC_FILTER, consts.IV_FILTER, consts.PSI_FILTER, consts.HETERO_SBT_FILTER, consts.HOMO_SBT_FILTER, consts.HETERO_FAST_SBT_FILTER, consts.VIF_FILTER]: if method == consts.STATISTIC_FILTER: metrics = self.model_param.statistic_param.metrics elif method == consts.IV_FILTER: metrics = self.model_param.iv_param.metrics elif method == consts.PSI_FILTER: metrics = self.model_param.psi_param.metrics elif method in [consts.HETERO_SBT_FILTER, consts.HOMO_SBT_FILTER, consts.HETERO_FAST_SBT_FILTER]: metrics = self.model_param.sbt_param.metrics elif method == consts.VIF_FILTER: metrics = self.model_param.vif_param.metrics else: raise ValueError(f"method: {method} is not supported") for idx, _ in enumerate(metrics): self._filter(data_instances, method, suffix=(str(filter_idx), str(idx)), idx=idx) else: self._filter(data_instances, method, suffix=str(filter_idx)) last_col_nums = self.curt_select_properties.last_left_col_names self.add_summary("all", { "last_col_nums": original_col_nums, "left_col_nums": len(last_col_nums), "left_col_names": last_col_nums }) new_data = self._transfer_data(data_instances) # LOGGER.debug(f"Final summary: {self.summary()}") LOGGER.info("Finish Hetero Selection Fit and transform.") return new_data @assert_io_num_rows_equal @assert_schema_consistent def transform(self, data_instances): self._abnormal_detection(data_instances) self._init_select_params(data_instances) # align data instance to model header & anonymous header data_instances = header_alignment(data_instances, self.header, self.anonymous_header) new_data = self._transfer_data(data_instances) return new_data
25,830
52.591286
119
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_selection/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/test/sampler_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import numpy as np from fate_arch.session import computing_session as session from federatedml.feature.instance import Instance from federatedml.feature.sampler import RandomSampler from federatedml.feature.sampler import StratifiedSampler from federatedml.util import consts class TestRandomSampler(unittest.TestCase): def setUp(self): session.init("test_random_sampler") self.data = [(i * 10 + 5, i * i) for i in range(100)] self.table = session.parallelize(self.data, include_key=True, partition=16) self.data_to_trans = [(i * 10 + 5, i * i * i) for i in range(100)] self.table_trans = session.parallelize(self.data_to_trans, include_key=True, partition=16) def test_downsample(self): sampler = RandomSampler(fraction=0.3, method="downsample") tracker = TrackerMock() sampler.set_tracker(tracker) sample_data, sample_ids = sampler.sample(self.table) self.assertTrue(sample_data.count() > 25 and sample_data.count() < 35) self.assertTrue(len(set(sample_ids)) == len(sample_ids)) new_data = list(sample_data.collect()) data_dict = dict(self.data) for id, value in new_data: self.assertTrue(id in data_dict) self.assertTrue(np.abs(value - data_dict.get(id)) < consts.FLOAT_ZERO) trans_sampler = RandomSampler(method="downsample") trans_sampler.set_tracker(tracker) trans_sample_data = trans_sampler.sample(self.table_trans, sample_ids) trans_data = list(trans_sample_data.collect()) trans_sample_ids = [id for (id, value) in trans_data] data_to_trans_dict = dict(self.data_to_trans) sample_id_mapping = dict(zip(sample_ids, range(len(sample_ids)))) self.assertTrue(len(trans_data) == len(sample_ids)) self.assertTrue(set(trans_sample_ids) == set(sample_ids)) for id, value in trans_data: self.assertTrue(id in sample_id_mapping) self.assertTrue(np.abs(value - data_to_trans_dict.get(id)) < consts.FLOAT_ZERO) def test_upsample(self): sampler = RandomSampler(fraction=3, method="upsample") tracker = TrackerMock() sampler.set_tracker(tracker) sample_data, sample_ids = sampler.sample(self.table) self.assertTrue(sample_data.count() > 250 and sample_data.count() < 350) data_dict = dict(self.data) new_data = list(sample_data.collect()) for id, value in new_data: self.assertTrue(np.abs(value - data_dict[sample_ids[id]]) < consts.FLOAT_ZERO) trans_sampler = RandomSampler(method="upsample") trans_sampler.set_tracker(tracker) trans_sample_data = trans_sampler.sample(self.table_trans, sample_ids) trans_data = list(trans_sample_data.collect()) data_to_trans_dict = dict(self.data_to_trans) self.assertTrue(len(trans_data) == len(sample_ids)) for id, value in trans_data: self.assertTrue(np.abs(value - data_to_trans_dict[sample_ids[id]]) < consts.FLOAT_ZERO) def tearDown(self): session.stop() class TestStratifiedSampler(unittest.TestCase): def setUp(self): session.init("test_stratified_sampler") self.data = [] self.data_to_trans = [] for i in range(1000): self.data.append((i, Instance(label=i % 4, features=i * i))) self.data_to_trans.append((i, Instance(features=i ** 3))) self.table = session.parallelize(self.data, include_key=True, partition=16) self.table_trans = session.parallelize(self.data_to_trans, include_key=True, partition=16) def test_downsample(self): fractions = [(0, 0.3), (1, 0.4), (2, 0.5), (3, 0.8)] sampler = StratifiedSampler(fractions=fractions, method="downsample") tracker = TrackerMock() sampler.set_tracker(tracker) sample_data, sample_ids = sampler.sample(self.table) count_label = [0 for i in range(4)] new_data = list(sample_data.collect()) data_dict = dict(self.data) self.assertTrue(set(sample_ids) & set(data_dict.keys()) == set(sample_ids)) for id, inst in new_data: count_label[inst.label] += 1 self.assertTrue(type(id).__name__ == 'int' and id >= 0 and id < 1000) self.assertTrue(inst.label == self.data[id][1].label and inst.features == self.data[id][1].features) for i in range(4): self.assertTrue(np.abs(count_label[i] - 250 * fractions[i][1]) < 10) trans_sampler = StratifiedSampler(method="downsample") trans_sampler.set_tracker(tracker) trans_sample_data = trans_sampler.sample(self.table_trans, sample_ids) trans_data = list(trans_sample_data.collect()) trans_sample_ids = [id for (id, value) in trans_data] data_to_trans_dict = dict(self.data_to_trans) self.assertTrue(set(trans_sample_ids) == set(sample_ids)) for id, inst in trans_data: self.assertTrue(inst.features == data_to_trans_dict.get(id).features) def test_upsample(self): fractions = [(0, 1.3), (1, 0.5), (2, 0.8), (3, 9)] sampler = StratifiedSampler(fractions=fractions, method="upsample") tracker = TrackerMock() sampler.set_tracker(tracker) sample_data, sample_ids = sampler.sample(self.table) new_data = list(sample_data.collect()) count_label = [0 for i in range(4)] data_dict = dict(self.data) for id, inst in new_data: count_label[inst.label] += 1 self.assertTrue(type(id).__name__ == 'int' and id >= 0 and id < len(sample_ids)) real_id = sample_ids[id] self.assertTrue(inst.label == self.data[real_id][1].label and inst.features == self.data[real_id][1].features) for i in range(4): self.assertTrue(np.abs(count_label[i] - 250 * fractions[i][1]) < 10) trans_sampler = StratifiedSampler(method="upsample") trans_sampler.set_tracker(tracker) trans_sample_data = trans_sampler.sample(self.table_trans, sample_ids) trans_data = (trans_sample_data.collect()) trans_sample_ids = [id for (id, value) in trans_data] data_to_trans_dict = dict(self.data_to_trans) self.assertTrue(sorted(trans_sample_ids) == list(range(len(sample_ids)))) for id, inst in trans_data: real_id = sample_ids[id] self.assertTrue(inst.features == data_to_trans_dict[real_id][1].features) def tearDown(self): session.stop() class TrackerMock(object): def log_component_summary(self, *args, **kwargs): pass def log_metric_data(self, *args, **kwargs): pass def set_metric_meta(self, *args, **kwargs): pass if __name__ == '__main__': unittest.main()
7,513
39.836957
112
py
FATE
FATE-master/python/federatedml/feature/test/instance_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from federatedml.feature.instance import Instance class TestInstance(unittest.TestCase): def setUp(self): pass def test_instance(self): inst = Instance(inst_id=5, weight=2.0, features=[1, 2, 3], label=-5) self.assertTrue(inst.inst_id == 5 and abs(inst.weight - 2.0) < 1e-8 and inst.features == [1, 2, 3] and inst.label == -5) inst.set_weight(3) inst.set_label(5) inst.set_feature(["yes", "no"]) self.assertTrue(inst.weight == 3 and inst.label == 5 and inst.features == ["yes", "no"]) if __name__ == '__main__': unittest.main()
1,259
31.307692
96
py
FATE
FATE-master/python/federatedml/feature/test/feature_imputation_test.py
import numpy as np import unittest from federatedml.feature.feature_imputation import load_value_to_type class TestFeatureImputation(unittest.TestCase): def test_load_value_to_type(self): true_v = None v_type = "None" str_v = None self.assertEqual(true_v, load_value_to_type(str_v, v_type)) true_v = 42 v_type = type(true_v).__name__ str_v = "42" self.assertEqual(true_v, load_value_to_type(str_v, v_type)) true_v = "42.0" v_type = type(true_v).__name__ str_v = "42.0" self.assertEqual(true_v, load_value_to_type(str_v, v_type)) true_v = 42.42 v_type = type(true_v).__name__ str_v = "42.42" self.assertEqual(true_v, load_value_to_type(str_v, v_type)) true_v = np.array([42, 2, 3])[0] v_type = type(true_v).__name__ str_v = 42 self.assertEqual(true_v, load_value_to_type(str_v, v_type)) if __name__ == "__main__": unittest.main()
1,006
26.216216
69
py
FATE
FATE-master/python/federatedml/feature/test/imputer_test.py
import numpy as np import random import time import unittest from fate_arch.session import computing_session as session from federatedml.feature.imputer import Imputer class TestImputer(unittest.TestCase): def setUp(self): session.init("test_imputer_" + str(random.random())) str_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) self.test_data = [ ["0.254879", "na", "0.209656", "10000", "-0.441366", "-10000", "-0.485934", "na", "-0.287570", "-0.733474"], ["-1.142928", "", "-1.166747", "-0.923578", "0.628230", "-1.021418", "-1.111867", "-0.959523", "-0.096672", "-0.121683"], ["-1.451067", "-1.406518", "none", "-1.092337", "none", "-1.168557", "-1.305831", "-1.745063", "-0.499499", "-0.302893"], ["-0.879933", "null", "-0.877527", "-0.780484", "-1.037534", "-0.483880", "-0.555498", "-0.768581", "0.433960", "-0.200928"], ["0.426758", "0.723479", "0.316885", "0.287273", "1.000835", "0.962702", "1.077099", "1.053586", "2.996525", "0.961696"], ["0.963102", "1.467675", "0.829202", "0.772457", "-0.038076", "-0.468613", "-0.307946", "-0.015321", "-0.641864", "-0.247477"], ["-0.662496", "0.212149", "-0.620475", "-0.632995", "-0.327392", "-0.385278", "-0.077665", "-0.730362", "0.217178", "-0.061280"], ["-0.453343", "-2.147457", "-0.473631", "-0.483572", "0.558093", "-0.740244", "-0.896170", "-0.617229", "-0.308601", "-0.666975"], ["-0.606584", "-0.971725", "-0.678558", "-0.591332", "-0.963013", "-1.302401", "-1.212855", "-1.321154", "-1.591501", "-1.230554"], ["-0.583805", "-0.193332", "-0.633283", "-0.560041", "-0.349310", "-0.519504", "-0.610669", "-0.929526", "-0.196974", "-0.151608"] ] self.test_instance = [] for td in self.test_data: self.test_instance.append(td) self.table_instance = self.data_to_table(self.test_instance) self.table_instance.schema['header'] = ["fid" + str(i) for i in range(len(self.test_data[0]))] self.table_instance.schema['anonymous_header'] = [ "guest_9999_x" + str(i) for i in range(len(self.test_data[0]))] def print_table(self, table): for v in (list(table.collect())): print(v[1].features) def data_to_table(self, data, partition=10): data_table = session.parallelize(data, include_key=False, partition=partition) return data_table def table_to_list(self, table_instance): res_list = [] for k, v in list(table_instance.collect()): res_list.append(list(v)) return res_list def fit_test_data(self, data, fit_values, imputer_value): for j in range(len(data)): for i in range(len(data[j])): if data[j][i] in imputer_value: data[j][i] = str(fit_values[i]) return data def fit_test_data_float(self, data, fit_values, imputer_value): for j in range(len(data)): for i in range(len(data[j])): if data[j][i] in imputer_value: data[j][i] = float(fit_values[i]) data[j][i] = float(data[j][i]) return data def test_fit_min(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, "min", output_format='str') cols_transform_value_ground_true = [-1.451067, -2.147457, -1.166747, -1.092337, -1.037534, -1.302401, -1.305831, -1.745063, -1.591501, -1.230554] test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_fit_max(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, "max", output_format='str') cols_transform_value_ground_true = [0.963102, 1.467675, 0.829202, 0.772457, 1.000835, 0.962702, 1.077099, 1.053586, 2.996525, 0.961696] test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_fit_mean(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, "mean", output_format='str') cols_transform_value_ground_true = [-0.413542, -0.330818, -0.343831, -0.444957, -0.107726, -0.569688, -0.548734, -0.670353, 0.002498, -0.275518] # imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] # test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) # process_data_list = self.table_to_list(process_data) # process_data_list = [[round(float(i), 6) for i in v] for v in process_data_list] # process_data_list = [[str(i) for i in v] for v in process_data_list] cols_transform_value = [round(v, 6) for v in cols_transform_value] # self.assertListEqual(process_data_list, test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_fit_replace_value(self): imputer_value = ['NA', 'naaa'] imputer = Imputer(imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, replace_method="designated", replace_value='111111', output_format='str') cols_transform_value_ground_true = ['111111' for _ in range(10)] test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_fit_none_replace_method(self): imputer_value = ['NA', 'naaa'] imputer = Imputer(imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, output_format='str') cols_transform_value_ground_true = [0 for _ in range(10)] test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_fit_max_float(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, "max", output_format='float') cols_transform_value_ground_true = [0.963102, 1.467675, 0.829202, 0.772457, 1.000835, 0.962702, 1.077099, 1.053586, 2.996525, 0.961696] test_data_fit = self.fit_test_data_float(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_transform(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) cols_transform_value_ground_true = [0.963102, 1.467675, 0.829202, 0.772457, 1.000835, 0.962702, 1.077099, 1.053586, 2.996525, 0.961696] process_data = imputer.transform(self.table_instance, cols_transform_value_ground_true) test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) def test_transform_float(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) cols_transform_value_ground_true = [0.963102, 1.467675, 0.829202, 0.772457, 1.000835, 0.962702, 1.077099, 1.053586, 2.996525, 0.961696] process_data = imputer.transform(self.table_instance, cols_transform_value_ground_true, output_format="float") test_data_fit = self.fit_test_data_float(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) def test_fit_median(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) process_data, cols_transform_value = imputer.fit(self.table_instance, "median", output_format='str') cols_transform_value_ground_true = [-0.606584, -0.193332, -0.620475, -0.591332, -0.327392, -0.519504, -0.610669, -0.768581, -0.28757, -0.247477] test_data_fit = self.fit_test_data(self.test_data, cols_transform_value_ground_true, imputer_value) self.assertListEqual(self.table_to_list(process_data), test_data_fit) self.assertListEqual(cols_transform_value, cols_transform_value_ground_true) def test_get_impute_rate(self): imputer_value = ['', 'none', 'na', 'null', "10000", "-10000"] imputer = Imputer(missing_value_list=imputer_value) _, _ = imputer.fit(self.table_instance, "median", output_format='str') cols_impute_rate_ground_true = [0, 0.3, 0.1, 0.1, 0.1, 0.1, 0, 0.1, 0, 0] cols_fit_impute_rate = imputer.get_impute_rate(mode="fit") self.assertListEqual(cols_fit_impute_rate, cols_impute_rate_ground_true) cols_transform_value_ground_true = [-0.606584, -0.193332, -0.620475, -0.591332, -0.327392, -0.519504, -0.610669, -0.768581, -0.28757, -0.247477] _ = imputer.transform(self.table_instance, cols_transform_value_ground_true) cols_transform_impute_rate = imputer.get_impute_rate(mode="fit") self.assertListEqual(cols_transform_impute_rate, cols_impute_rate_ground_true) def tearDown(self): session.stop() if __name__ == "__main__": unittest.main()
10,924
55.314433
120
py
FATE
FATE-master/python/federatedml/feature/test/one_hot_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from fate_arch.session import computing_session as session session.init("123") from federatedml.feature.one_hot_encoder import OneHotEncoder from federatedml.feature.instance import Instance from federatedml.util.anonymous_generator_util import Anonymous import numpy as np class TestOneHotEncoder(unittest.TestCase): def setUp(self): self.data_num = 1000 self.feature_num = 3 self.cols = [0, 1, 2, 3] self.header = ['x' + str(i) for i in range(self.feature_num)] self.anonymous_header = ["guest_9999_x" + str(i) for i in range(self.feature_num)] final_result = [] for i in range(self.data_num): tmp = [] for _ in range(self.feature_num): tmp.append(np.random.choice([1, 2, 3, 'test_str'])) tmp = np.array(tmp) inst = Instance(inst_id=i, features=tmp, label=0) tmp_pair = (str(i), inst) final_result.append(tmp_pair) table = session.parallelize(final_result, include_key=True, partition=10) table.schema = {"header": self.header, "anonymous_header": self.anonymous_header} self.model_name = 'OneHotEncoder' self.table = table self.args = {"data": {self.model_name: {"data": table}}} def test_instance(self): one_hot_encoder = OneHotEncoder() one_hot_encoder.anonymous_generator = Anonymous() one_hot_encoder.cols = self.cols one_hot_encoder.cols_index = self.cols result = one_hot_encoder.fit(self.table) local_result = result.collect() for k, v in local_result: new_features = v.features self.assertTrue(len(new_features) == self.feature_num * len(self.cols)) if __name__ == '__main__': unittest.main()
2,567
33.24
90
py
FATE
FATE-master/python/federatedml/feature/test/sparse_vector_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from federatedml.feature.sparse_vector import SparseVector class TestSparseVector(unittest.TestCase): def setUp(self): pass def test_instance(self): indices = [] data = [] for i in range(1, 10): indices.append(i * i) data.append(i ** 3) shape = 100 sparse_data = SparseVector(indices, data, shape) self.assertTrue(sparse_data.shape == shape and len(sparse_data.sparse_vec) == 9) self.assertTrue(sparse_data.count_zeros() == 91) self.assertTrue(sparse_data.count_non_zeros() == 9) for idx, val in zip(indices, data): self.assertTrue(sparse_data.get_data(idx) == val) for i in range(100): if i in indices: continue self.assertTrue(sparse_data.get_data(i, i ** 4) == i ** 4) self.assertTrue(dict(sparse_data.get_all_data()) == dict(zip(indices, data))) if __name__ == '__main__': unittest.main()
1,620
30.173077
88
py
FATE
FATE-master/python/federatedml/feature/test/quantile_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import numpy as np import random from fate_arch.session import computing_session as session import uuid from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.feature.instance import Instance # from federatedml.feature.quantile import Quantile from federatedml.feature.sparse_vector import SparseVector from federatedml.param.feature_binning_param import FeatureBinningParam class TestInstance(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) session.init(self.job_id) # session.init("test_instance") def gen_data(self): dense_inst = [] headers = ['x' + str(i) for i in range(20)] anonymous_header = ["guest_9999_x" + str(i) for i in range(20)] for i in range(100): inst = Instance(features=(i % 16 * np.ones(20))) dense_inst.append((i, inst)) self.dense_table = session.parallelize(dense_inst, include_key=True, partition=2) self.dense_table.schema = {'header': headers, "anonymous_header": anonymous_header} self.sparse_inst = [] for i in range(100): dict = {} indices = [] data = [] for j in range(20): idx = random.randint(0, 29) if idx in dict: continue dict[idx] = 1 val = random.random() indices.append(idx) data.append(val) sparse_vec = SparseVector(indices, data, 30) self.sparse_inst.append((i, Instance(features=sparse_vec))) self.sparse_table = session.parallelize(self.sparse_inst, include_key=True, partition=48) self.sparse_table.schema = {"header": ["fid" + str(i) for i in range(30)]} # self.sparse_table = eggroll.parallelize(sparse_inst, include_key=True, partition=1) """ def test_dense_quantile(self): data_bin, bin_splitpoints, bin_sparse = Quantile.convert_feature_to_bin(self.dense_table, "bin_by_sample_data", bin_num=4) bin_result = dict([(key, inst.features) for key, inst in data_bin.collect()]) for i in range(100): self.assertTrue((bin_result[i] == np.ones(20, dtype='int') * ((i % 16) // 4)).all()) if i < 20: self.assertTrue((bin_splitpoints[i] == np.asarray([3, 7, 11, 15], dtype='int')).all()) data_bin, bin_splitpoints, bin_sparse = Quantile.convert_feature_to_bin(self.dense_table, "bin_by_data_block", bin_num=4) for i in range(20): self.assertTrue(bin_splitpoints[i].shape[0] <= 4) def test_sparse_quantile(self): data_bin, bin_splitpoints, bin_sparse = Quantile.convert_feature_to_bin(self.sparse_table, "bin_by_sample_data", bin_num=4) bin_result = dict([(key, inst.features) for key, inst in data_bin.collect()]) for i in range(20): self.assertTrue(len(self.sparse_inst[i][1].features.sparse_vec) == len(bin_result[i].sparse_vec)) """ """ def test_new_sparse_quantile(self): self.gen_data() param_obj = FeatureBinningParam(bin_num=4) binning_obj = QuantileBinning(param_obj) binning_obj.fit_split_points(self.sparse_table) data_bin, bin_splitpoints, bin_sparse = binning_obj.convert_feature_to_bin(self.sparse_table) bin_result = dict([(key, inst.features) for key, inst in data_bin.collect()]) for i in range(20): self.assertTrue(len(self.sparse_inst[i][1].features.sparse_vec) == len(bin_result[i].sparse_vec)) """ def test_new_dense_quantile(self): self.gen_data() param_obj = FeatureBinningParam(bin_num=4) binning_obj = QuantileBinning(param_obj) binning_obj.fit_split_points(self.dense_table) data_bin, bin_splitpoints, bin_sparse = binning_obj.convert_feature_to_bin(self.dense_table) bin_result = dict([(key, inst.features) for key, inst in data_bin.collect()]) # print(bin_result) for i in range(100): self.assertTrue((bin_result[i] == np.ones(20, dtype='int') * ((i % 16) // 4)).all()) if i < 20: # col_name = 'x' + str(i) col_idx = i split_point = np.array(bin_splitpoints[col_idx]) self.assertTrue((split_point == np.asarray([3, 7, 11, 15], dtype='int')).all()) for split_points in bin_splitpoints: self.assertTrue(len(split_points) <= 4) def tearDown(self): session.stop() # try: # session.cleanup("*", self.job_id, True) # except EnvironmentError: # pass # try: # session.cleanup("*", self.job_id, False) # except EnvironmentError: # pass if __name__ == '__main__': unittest.main()
5,747
40.352518
120
py
FATE
FATE-master/python/federatedml/feature/test/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/feature/test/ohe_alignment_test.py
import unittest import uuid from fate_arch.session import computing_session as session from federatedml.feature.homo_onehot.homo_ohe_arbiter import HomoOneHotArbiter class TestOHE_alignment(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) session.init(self.job_id) def test_instance(self): ohe_alignment_arbiter = HomoOneHotArbiter() guest_columns = [ {'race_black': ['0', '1'], 'race_hispanic': ['0'], 'race_asian': ['0', '1'], 'race_other': ['1'], 'electivesurgery': ['0', '1']}] host_columns = [ {'race_black': ['0', '1'], 'race_hispanic': ['0', '1'], 'race_asian': ['0', '1'], 'race_other': ['0'], 'electivesurgery': ['0', '1']}] aligned_columns = sorted( ohe_alignment_arbiter.combine_all_column_headers(guest_columns, host_columns)['race_hispanic']) self.assertTrue(len(aligned_columns) == 2) self.assertEqual(['0', '1'], aligned_columns) def tearDown(self): session.stop() if __name__ == '__main__': unittest.main()
1,102
31.441176
114
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_binning/hetero_binning_guest.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import numpy as np from federatedml.cipher_compressor.packer import GuestIntegerPacker from federatedml.feature.binning.iv_calculator import IvCalculator from federatedml.feature.binning.optimal_binning.optimal_binning import OptimalBinning from federatedml.feature.hetero_feature_binning.base_feature_binning import BaseFeatureBinning from federatedml.secureprotol import PaillierEncrypt from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber from federatedml.statistic import data_overview from federatedml.statistic import statics from federatedml.util import LOGGER from federatedml.util import consts class HeteroFeatureBinningGuest(BaseFeatureBinning): def __init__(self): super().__init__() self._packer: GuestIntegerPacker = None def fit(self, data_instances): """ Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate the specific metric value for specific columns. Currently, iv is support for binary labeled data only. """ LOGGER.info("Start feature binning fit and transform") self._abnormal_detection(data_instances) # self._parse_cols(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) split_points_obj = None if self.model_param.method == consts.OPTIMAL: has_missing_value = self.iv_calculator.check_containing_missing_value(data_instances) for idx in self.bin_inner_param.bin_indexes: if idx in has_missing_value: raise ValueError(f"Optimal Binning do not support missing value now.") if self.model_param.split_points_by_col_name or self.model_param.split_points_by_index: split_points = self._get_manual_split_points(data_instances) self.use_manual_split_points = True for col_name, sp in split_points.items(): self.binning_obj.bin_results.put_col_split_points(col_name, sp) else: split_points = self.binning_obj.fit_split_points(data_instances) split_points_obj = self.binning_obj.bin_results if self.model_param.skip_static: self.record_missing(data_instances) self.transform_data(data_instances) return self.data_output label_counts_dict, label_counts, label_table = self.stat_label(data_instances) self.bin_result = self.cal_local_iv(data_instances, split_points, label_counts, label_table) self.record_missing(data_instances) if self.model_param.method == consts.OPTIMAL and split_points_obj is not None: # LOGGER.debug(f"set optimal metric array") self.set_optimal_metric_array(split_points_obj.all_optimal_metric) if self.model_param.local_only: self.transform_data(data_instances) self.set_summary(self.bin_result.summary()) return self.data_output self.host_results = self.federated_iv( data_instances=data_instances, label_table=label_table, result_counts=label_counts_dict, label_elements=self.labels, label_counts=label_counts) total_summary = self.bin_result.summary() for host_res in self.host_results: total_summary = self._merge_summary(total_summary, host_res.summary()) self.set_schema(data_instances) self.transform_data(data_instances) LOGGER.info("Finish feature binning fit and transform") self.set_summary(total_summary) return self.data_output def transform(self, data_instances): if self.model_param.skip_static: self.transform_data(data_instances) return self.data_output has_label = True if data_instances.first()[1].label is None: has_label = False self.transfer_variable.transform_stage_has_label.remote(has_label, role=consts.HOST, idx=-1) if not has_label: self.transform_data(data_instances) return self.data_output self._setup_bin_inner_param(data_instances, self.model_param) label_counts_dict, label_counts, label_table = self.stat_label(data_instances) if (set(self.labels) & set(label_counts_dict)) != set(label_counts_dict): raise ValueError(f"Label {set(self.labels) - set(label_counts_dict)} can not be recognized") split_points = self.binning_obj.bin_results.all_split_points self.transform_bin_result = self.cal_local_iv(data_instances, split_points, label_counts, label_table) if self.model_param.local_only: self.transform_data(data_instances) self.set_summary(self.bin_result.summary()) return self.data_output self.transform_host_results = self.federated_iv(data_instances=data_instances, label_table=label_table, result_counts=label_counts_dict, label_elements=self.labels, label_counts=label_counts) total_summary = self.transform_bin_result.summary() for host_res in self.transform_host_results: total_summary = self._merge_summary(total_summary, host_res.summary()) self.set_schema(data_instances) self.transform_data(data_instances) LOGGER.info("Finish feature binning fit and transform") self.set_summary(total_summary) return self.data_output def stat_label(self, data_instances): label_counts_dict = data_overview.get_label_count(data_instances) if len(label_counts_dict) > 2: if self.model_param.method == consts.OPTIMAL: raise ValueError("Have not supported optimal binning in multi-class data yet") if self._stage == "fit": self.labels = list(label_counts_dict.keys()) self.labels.sort() self.labels.reverse() label_counts = [label_counts_dict.get(k, 0) for k in self.labels] label_table = IvCalculator.convert_label(data_instances, self.labels) return label_counts_dict, label_counts, label_table def cal_local_iv(self, data_instances, split_points, label_counts, label_table): bin_result = self.iv_calculator.cal_local_iv(data_instances=data_instances, split_points=split_points, labels=self.labels, label_counts=label_counts, bin_cols_map=self.bin_inner_param.get_need_cal_iv_cols_map(), label_table=label_table) return bin_result def federated_iv(self, data_instances, label_table, result_counts, label_elements, label_counts): if self.model_param.encrypt_param.method == consts.PAILLIER: paillier_encryptor = PaillierEncrypt() paillier_encryptor.generate_key(self.model_param.encrypt_param.key_length) else: raise NotImplementedError("encrypt method not supported yet") self._packer = GuestIntegerPacker(pack_num=len(self.labels), pack_num_range=label_counts, encrypter=paillier_encryptor) converted_label_table = label_table.mapValues(lambda x: [int(i) for i in x]) encrypted_label_table = self._packer.pack_and_encrypt(converted_label_table) self.transfer_variable.encrypted_label.remote(encrypted_label_table, role=consts.HOST, idx=-1) encrypted_bin_sum_infos = self.transfer_variable.encrypted_bin_sum.get(idx=-1) encrypted_bin_infos = self.transfer_variable.optimal_info.get(idx=-1) LOGGER.info("Get encrypted_bin_sum from host") host_results = [] for host_idx, encrypted_bin_info in enumerate(encrypted_bin_infos): host_party_id = self.component_properties.host_party_idlist[host_idx] encrypted_bin_sum = encrypted_bin_sum_infos[host_idx] # assert 1 == 2, f"encrypted_bin_sum: {list(encrypted_bin_sum.collect())}" result_counts_table = self._packer.decrypt_cipher_package_and_unpack(encrypted_bin_sum) # LOGGER.debug(f"unpack result: {result_counts_table.first()}") bin_result = self.cal_bin_results(data_instances=data_instances, host_idx=host_idx, encrypted_bin_info=encrypted_bin_info, result_counts_table=result_counts_table, result_counts=result_counts, label_elements=label_elements) bin_result.set_role_party(role=consts.HOST, party_id=host_party_id) host_results.append(bin_result) return host_results def host_optimal_binning(self, data_instances, host_idx, encrypted_bin_info, result_counts, category_names): optimal_binning_params = encrypted_bin_info['optimal_params'] host_model_params = copy.deepcopy(self.model_param) host_model_params.bin_num = optimal_binning_params.get('bin_num') host_model_params.optimal_binning_param.metric_method = optimal_binning_params.get('metric_method') host_model_params.optimal_binning_param.mixture = optimal_binning_params.get('mixture') host_model_params.optimal_binning_param.max_bin_pct = optimal_binning_params.get('max_bin_pct') host_model_params.optimal_binning_param.min_bin_pct = optimal_binning_params.get('min_bin_pct') event_total, non_event_total = self.get_histogram(data_instances) result_counts = dict(result_counts.collect()) optimal_binning_cols = {x: y for x, y in result_counts.items() if x not in category_names} host_binning_obj = OptimalBinning(params=host_model_params, abnormal_list=self.binning_obj.abnormal_list) host_binning_obj.event_total = event_total host_binning_obj.non_event_total = non_event_total host_binning_obj = self.optimal_binning_sync(host_binning_obj, optimal_binning_cols, data_instances.count(), data_instances.partitions, host_idx) return host_binning_obj def cal_bin_results(self, data_instances, host_idx, encrypted_bin_info, result_counts_table, result_counts, label_elements): host_bin_methods = encrypted_bin_info['bin_method'] category_names = encrypted_bin_info['category_names'] result_counts_dict = dict(result_counts_table.collect()) host_party_id = self.component_properties.host_party_idlist[host_idx] if host_bin_methods == consts.OPTIMAL and self._stage == "fit": if len(result_counts) > 2: raise ValueError("Have not supported optimal binning in multi-class data yet") host_binning_obj = self.host_optimal_binning(data_instances, host_idx, encrypted_bin_info, result_counts_table, category_names) optimal_counts = {} for col_name, bucket_list in host_binning_obj.bucket_lists.items(): optimal_counts[col_name] = [np.array([b.event_count, b.non_event_count]) for b in bucket_list] for col_name, counts in result_counts_dict.items(): if col_name in category_names: optimal_counts[col_name] = counts # LOGGER.debug(f"optimal_counts: {optimal_counts}") bin_res = self.iv_calculator.cal_iv_from_counts(optimal_counts, labels=label_elements, role=consts.HOST, party_id=host_party_id) else: bin_res = self.iv_calculator.cal_iv_from_counts(result_counts_table, label_elements, role=consts.HOST, party_id=host_party_id) return bin_res @staticmethod def convert_decompress_format(encrypted_bin_sum): """ Parameters ---------- encrypted_bin_sum : dict. {"keys": ['x1', 'x2' ...], "event_counts": [...], "non_event_counts": [...], bin_num": [...] } returns ------- {'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ], 'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ], ... } """ result = {} start = 0 event_counts = [int(x) for x in encrypted_bin_sum['event_counts']] non_event_counts = [int(x) for x in encrypted_bin_sum['non_event_counts']] for idx, k in enumerate(encrypted_bin_sum["keys"]): bin_num = encrypted_bin_sum["bin_nums"][idx] result[k] = list(zip(event_counts[start: start + bin_num], non_event_counts[start: start + bin_num])) start += bin_num assert start == len(event_counts) == len(non_event_counts), \ f"Length of event/non-event does not match " \ f"with bin_num sums, all_counts: {start}, length of event count: {len(event_counts)}," \ f"length of non_event_counts: {len(non_event_counts)}" return result @staticmethod def _merge_summary(summary_1, summary_2): def merge_single_label(s1, s2): res = {} for k, v in s1.items(): if k == 'iv': v.extend(s2[k]) v = sorted(v, key=lambda p: p[1], reverse=True) else: v.update(s2[k]) res[k] = v return res res = {} for label, s1 in summary_1.items(): s2 = summary_2.get(label) res[label] = merge_single_label(s1, s2) return res @staticmethod def encrypt(x, cipher): if not isinstance(x, np.ndarray): return cipher.encrypt(x) res = [] for idx, value in enumerate(x): res.append(cipher.encrypt(value)) return np.array(res) @staticmethod def __decrypt_bin_sum(encrypted_bin_sum, cipher): def decrypt(values): res = [] for counts in values: for idx, c in enumerate(counts): if isinstance(c, PaillierEncryptedNumber): counts[idx] = cipher.decrypt(c) res.append(counts) return res return encrypted_bin_sum.mapValues(decrypt) @staticmethod def load_data(data_instance): data_instance = copy.deepcopy(data_instance) # Here suppose this is a binary question and the event label is 1 if data_instance.label != 1: data_instance.label = 0 return data_instance def optimal_binning_sync(self, host_binning_obj, result_counts, sample_count, partitions, host_idx): LOGGER.debug("Start host party optimal binning train") bucket_table = host_binning_obj.bin_sum_to_bucket_list(result_counts, partitions) host_binning_obj.fit_buckets(bucket_table, sample_count) encoded_split_points = host_binning_obj.bin_results.all_split_points self.transfer_variable.bucket_idx.remote(encoded_split_points, role=consts.HOST, idx=host_idx) return host_binning_obj @staticmethod def get_histogram(data_instances): static_obj = statics.MultivariateStatisticalSummary(data_instances, cols_index=-1) label_historgram = static_obj.get_label_histogram() event_total = label_historgram.get(1, 0) non_event_total = label_historgram.get(0, 0) if event_total == 0 or non_event_total == 0: LOGGER.warning(f"event_total or non_event_total might have errors, event_total: {event_total}," f" non_event_total: {non_event_total}") return event_total, non_event_total
17,537
47.049315
116
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_binning/__init__.py
0
0
0
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_binning/base_feature_binning.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from federatedml.feature.binning.base_binning import BaseBinning from federatedml.feature.binning.bin_inner_param import BinInnerParam from federatedml.feature.binning.bin_result import MultiClassBinResult from federatedml.feature.binning.bucket_binning import BucketBinning from federatedml.feature.binning.iv_calculator import IvCalculator from federatedml.feature.binning.optimal_binning.optimal_binning import OptimalBinning from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.feature.fate_element_type import NoneType from federatedml.feature.sparse_vector import SparseVector from federatedml.model_base import ModelBase from federatedml.param.feature_binning_param import HeteroFeatureBinningParam as FeatureBinningParam from federatedml.protobuf.generated import feature_binning_meta_pb2, feature_binning_param_pb2 from federatedml.statistic.data_overview import get_header, get_anonymous_header from federatedml.transfer_variable.transfer_class.hetero_feature_binning_transfer_variable import \ HeteroFeatureBinningTransferVariable from federatedml.util import LOGGER from federatedml.util import abnormal_detection from federatedml.util import consts from federatedml.util.anonymous_generator_util import Anonymous from federatedml.util.io_check import assert_io_num_rows_equal from federatedml.util.schema_check import assert_schema_consistent MODEL_PARAM_NAME = 'FeatureBinningParam' MODEL_META_NAME = 'FeatureBinningMeta' class BaseFeatureBinning(ModelBase): """ Do binning method through guest and host """ def __init__(self): super(BaseFeatureBinning, self).__init__() self.transfer_variable = HeteroFeatureBinningTransferVariable() self.binning_obj: BaseBinning = None self.header = None self.anonymous_header = None self.training_anonymous_header = None self.schema = None self.host_results = [] self.transform_host_results = [] self.transform_type = None self.model_param = FeatureBinningParam() self.bin_inner_param = BinInnerParam() self.bin_result = MultiClassBinResult(labels=[0, 1]) self.transform_bin_result = MultiClassBinResult(labels=[0, 1]) self.has_missing_value = False self.labels = [] self.use_manual_split_points = False self.has_woe_array = False self._stage = "fit" def _init_model(self, params: FeatureBinningParam): self.model_param = params self.transform_type = self.model_param.transform_param.transform_type """ if self.role == consts.HOST: if self.transform_type == "woe": raise ValueError("Host party do not support woe transform now.") """ if self.model_param.method == consts.QUANTILE: self.binning_obj = QuantileBinning(self.model_param) elif self.model_param.method == consts.BUCKET: self.binning_obj = BucketBinning(self.model_param) elif self.model_param.method == consts.OPTIMAL: if self.role == consts.HOST: self.model_param.bin_num = self.model_param.optimal_binning_param.init_bin_nums self.binning_obj = QuantileBinning(self.model_param) else: self.binning_obj = OptimalBinning(self.model_param) else: raise ValueError(f"Binning method: {self.model_param.method} is not supported.") self.iv_calculator = IvCalculator(self.model_param.adjustment_factor, role=self.role, party_id=self.component_properties.local_partyid) def _get_manual_split_points(self, data_instances): data_index_to_col_name = dict(enumerate(data_instances.schema.get("header"))) manual_split_points = {} if self.model_param.split_points_by_index is not None: manual_split_points = { data_index_to_col_name.get(int(k), None): v for k, v in self.model_param.split_points_by_index.items() } if None in manual_split_points.keys(): raise ValueError(f"Index given in `split_points_by_index` not found in input data header." f"Please check.") if self.model_param.split_points_by_col_name is not None: for col_name, split_points in self.model_param.split_points_by_col_name.items(): if manual_split_points.get(col_name) is not None: raise ValueError(f"Split points for feature {col_name} given in both " f"`split_points_by_index` and `split_points_by_col_name`. Please check.") manual_split_points[col_name] = split_points if set(self.bin_inner_param.bin_names) != set(manual_split_points.keys()): raise ValueError(f"Column set from provided split points dictionary does not match that of" f"`bin_names` or `bin_indexes. Please check.`") return manual_split_points @staticmethod def data_format_transform(row): """ transform data into sparse format """ if type(row.features).__name__ != consts.SPARSE_VECTOR: feature_shape = row.features.shape[0] indices = [] data = [] for i in range(feature_shape): if np.isnan(row.features[i]): indices.append(i) data.append(NoneType()) elif np.abs(row.features[i]) < consts.FLOAT_ZERO: continue else: indices.append(i) data.append(row.features[i]) new_row = copy.deepcopy(row) new_row.features = SparseVector(indices, data, feature_shape) return new_row else: sparse_vec = row.features.get_sparse_vector() replace_key = [] for key in sparse_vec: if sparse_vec.get(key) == NoneType() or np.isnan(sparse_vec.get(key)): replace_key.append(key) if len(replace_key) == 0: return row else: new_row = copy.deepcopy(row) new_sparse_vec = new_row.features.get_sparse_vector() for key in replace_key: new_sparse_vec[key] = NoneType() return new_row def _setup_bin_inner_param(self, data_instances, params): if self.schema is not None: return self.header = get_header(data_instances) self.anonymous_header = get_anonymous_header(data_instances) LOGGER.debug("_setup_bin_inner_param, get header length: {}".format(len(self.header))) self.schema = data_instances.schema self.bin_inner_param.set_header(self.header, self.anonymous_header) if params.bin_indexes == -1: self.bin_inner_param.set_bin_all() else: self.bin_inner_param.add_bin_indexes(params.bin_indexes) self.bin_inner_param.add_bin_names(params.bin_names) self.bin_inner_param.add_category_indexes(params.category_indexes) self.bin_inner_param.add_category_names(params.category_names) if params.transform_param.transform_cols == -1: self.bin_inner_param.set_transform_all() else: self.bin_inner_param.add_transform_bin_indexes(params.transform_param.transform_cols) self.bin_inner_param.add_transform_bin_names(params.transform_param.transform_names) self.binning_obj.set_bin_inner_param(self.bin_inner_param) @assert_io_num_rows_equal @assert_schema_consistent def transform_data(self, data_instances): self._setup_bin_inner_param(data_instances, self.model_param) if self.transform_type != "woe": data_instances = self.binning_obj.transform(data_instances, self.transform_type) elif self.role == consts.HOST and not self.has_woe_array: raise ValueError("Woe transform is not available for host parties.") else: data_instances = self.iv_calculator.woe_transformer(data_instances, self.bin_inner_param, self.bin_result) self.set_schema(data_instances) self.data_output = data_instances return data_instances def _get_meta(self): # col_list = [str(x) for x in self.cols] transform_param = feature_binning_meta_pb2.TransformMeta( transform_cols=self.bin_inner_param.transform_bin_indexes, transform_type=self.model_param.transform_param.transform_type ) optimal_metric_method = None if self.model_param.method == consts.OPTIMAL and not self.use_manual_split_points: optimal_metric_method = self.model_param.optimal_binning_param.metric_method meta_protobuf_obj = feature_binning_meta_pb2.FeatureBinningMeta( method=self.model_param.method, compress_thres=self.model_param.compress_thres, head_size=self.model_param.head_size, error=self.model_param.error, bin_num=self.model_param.bin_num, cols=self.bin_inner_param.bin_names, adjustment_factor=self.model_param.adjustment_factor, local_only=self.model_param.local_only, need_run=self.need_run, transform_param=transform_param, skip_static=self.model_param.skip_static, optimal_metric_method=optimal_metric_method ) return meta_protobuf_obj def _get_param(self): split_points_result = self.binning_obj.bin_results.split_results multi_class_result = self.bin_result.generated_pb_list(split_points_result) # LOGGER.debug(f"split_points_result: {split_points_result}") host_multi_class_result = [] host_single_results = [] anonymous_dict_list = [] if self._stage == "transform" and self._check_lower_version_anonymous(): if self.role == consts.GUEST: anonymous_dict_list = self.transfer_variable.host_anonymous_header_dict.get(idx=-1) elif self.role == consts.HOST: anonymous_dict = dict(zip(self.training_anonymous_header, self.anonymous_header)) self.transfer_variable.host_anonymous_header_dict.remote( anonymous_dict, role=consts.GUEST, idx=0 ) for idx, host_res in enumerate(self.host_results): if not anonymous_dict_list: host_multi_class_result.extend(host_res.generated_pb_list()) host_single_results.append(host_res.bin_results[0].generated_pb()) else: updated_anonymous_header = anonymous_dict_list[idx] host_res.update_anonymous(updated_anonymous_header) host_multi_class_result.extend(host_res.generated_pb_list()) host_single_results.append(host_res.bin_results[0].generated_pb()) has_host_result = True if len(host_multi_class_result) else False multi_pb = feature_binning_param_pb2.MultiClassResult( results=multi_class_result, labels=[str(x) for x in self.labels], host_results=host_multi_class_result, host_party_ids=[str(x) for x in self.component_properties.host_party_idlist], has_host_result=has_host_result ) if self._stage == "fit": result_obj = feature_binning_param_pb2. \ FeatureBinningParam(binning_result=multi_class_result[0], host_results=host_single_results, header=self.header, header_anonymous=self.anonymous_header, model_name=consts.BINNING_MODEL, multi_class_result=multi_pb) else: transform_multi_class_result = self.transform_bin_result.generated_pb_list(split_points_result) transform_host_single_results = [] transform_host_multi_class_result = [] for host_res in self.transform_host_results: transform_host_multi_class_result.extend(host_res.generated_pb_list()) transform_host_single_results.append(host_res.bin_results[0].generated_pb()) transform_multi_pb = feature_binning_param_pb2.MultiClassResult( results=transform_multi_class_result, labels=[str(x) for x in self.labels], host_results=transform_host_multi_class_result, host_party_ids=[str(x) for x in self.component_properties.host_party_idlist], has_host_result=has_host_result ) result_obj = feature_binning_param_pb2. \ FeatureBinningParam(binning_result=multi_class_result[0], host_results=host_single_results, header=self.header, header_anonymous=self.anonymous_header, model_name=consts.BINNING_MODEL, multi_class_result=multi_pb, transform_binning_result=transform_multi_class_result[0], transform_host_results=transform_host_single_results, transform_multi_class_result=transform_multi_pb) return result_obj def load_model(self, model_dict): model_param = list(model_dict.get('model').values())[0].get(MODEL_PARAM_NAME) model_meta = list(model_dict.get('model').values())[0].get(MODEL_META_NAME) self.bin_inner_param = BinInnerParam() multi_class_result = model_param.multi_class_result self.labels = list(map(int, multi_class_result.labels)) if self.labels: self.bin_result = MultiClassBinResult.reconstruct(list(multi_class_result.results), self.labels) if self.role == consts.HOST: binning_result = dict(list(multi_class_result.results)[0].binning_result) woe_array = list(binning_result.values())[0].woe_array self.bin_result = MultiClassBinResult.reconstruct(list(multi_class_result.results)) # if manual woe, reconstruct if woe_array: self.has_woe_array = True assert isinstance(model_meta, feature_binning_meta_pb2.FeatureBinningMeta) assert isinstance(model_param, feature_binning_param_pb2.FeatureBinningParam) self.header = list(model_param.header) self.training_anonymous_header = list(model_param.header_anonymous) self.bin_inner_param.set_header(self.header, self.training_anonymous_header) self.bin_inner_param.add_transform_bin_indexes(list(model_meta.transform_param.transform_cols)) self.bin_inner_param.add_bin_names(list(model_meta.cols)) self.transform_type = model_meta.transform_param.transform_type bin_method = str(model_meta.method) if bin_method == consts.QUANTILE: self.binning_obj = QuantileBinning(params=model_meta) elif bin_method == consts.OPTIMAL: self.binning_obj = OptimalBinning(params=model_meta) else: self.binning_obj = BucketBinning(params=model_meta) # self.binning_obj.set_role_party(self.role, self.component_properties.local_partyid) self.binning_obj.set_bin_inner_param(self.bin_inner_param) split_results = dict(model_param.binning_result.binning_result) for col_name, sr_pb in split_results.items(): split_points = list(sr_pb.split_points) self.binning_obj.bin_results.put_col_split_points(col_name, split_points) # self.binning_obj.bin_results.reconstruct(model_param.binning_result) self.host_results = [] host_pbs = list(model_param.multi_class_result.host_results) if len(host_pbs): if len(self.labels) == 2: for host_pb in host_pbs: self.host_results.append(MultiClassBinResult.reconstruct( host_pb, self.labels)) else: assert len(host_pbs) % len(self.labels) == 0 i = 0 while i < len(host_pbs): this_pbs = host_pbs[i: i + len(self.labels)] self.host_results.append(MultiClassBinResult.reconstruct(this_pbs, self.labels)) i += len(self.labels) """ if list(model_param.header_anonymous): self.anonymous_header = list(model_param.anonymous_header) """ self._stage = "transform" # add missing parameters while loading trained model self.model_param.bin_names = list(model_meta.cols) # force configure bin_indexes to empty list because model_meta.cols is all bin col name, # defult bin_indexs = -1, so it will process all column during transform stage. self.model_param.bin_indexes = [] if model_meta.transform_param: self.model_param.transform_param.transform_cols = list(model_meta.transform_param.transform_cols) self.model_param.transform_param.transform_type = model_meta.transform_param.transform_type if model_meta.skip_static: self.model_param.skip_static = model_meta.skip_static def record_missing(self, instances): from federatedml.statistic.statics import MissingStatistic header = instances.schema['header'] tag_id_mapping = {v: k for k, v in enumerate(header)} feature_count_rs = MissingStatistic.count_feature_ratio(instances, tag_id_mapping, not MissingStatistic.is_sparse(instances), missing_val=self.binning_obj.abnormal_list) total_count = instances.count() for col_idx, feature_count in enumerate(feature_count_rs): missing = feature_count < total_count self.bin_result.put_col_missing(header[col_idx], missing) def export_model(self): if self.model_output is not None: return self.model_output meta_obj = self._get_meta() param_obj = self._get_param() result = { MODEL_META_NAME: meta_obj, MODEL_PARAM_NAME: param_obj } self.model_output = result return result def save_data(self): return self.data_output def set_schema(self, data_instance): self.schema['header'] = self.header data_instance.schema = self.schema # LOGGER.debug("After Binning, when setting schema, schema is : {}".format(data_instance.schema)) def set_optimal_metric_array(self, optimal_metric_array_dict): # LOGGER.debug(f"optimal metric array dict: {optimal_metric_array_dict}") for col_name, optimal_metric_array in optimal_metric_array_dict.items(): self.bin_result.put_optimal_metric_array(col_name, optimal_metric_array) # LOGGER.debug(f"after set optimal metric, self.bin_result metric is: {self.bin_result.all_optimal_metric}") def _abnormal_detection(self, data_instances): """ Make sure input data_instances is valid. """ abnormal_detection.empty_table_detection(data_instances) abnormal_detection.empty_feature_detection(data_instances) self.check_schema_content(data_instances.schema) def _check_lower_version_anonymous(self): return not self.training_anonymous_header or \ Anonymous.is_old_version_anonymous_header(self.training_anonymous_header)
20,700
46.370709
118
py
FATE
FATE-master/python/federatedml/feature/hetero_feature_binning/hetero_binning_host.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import operator from federatedml.cipher_compressor.compressor import CipherCompressorHost from federatedml.feature.hetero_feature_binning.base_feature_binning import BaseFeatureBinning from federatedml.util import LOGGER from federatedml.util import consts class HeteroFeatureBinningHost(BaseFeatureBinning): def __init__(self): super(HeteroFeatureBinningHost, self).__init__() self.compressor = None def fit(self, data_instances): self._abnormal_detection(data_instances) # self._parse_cols(data_instances) self._setup_bin_inner_param(data_instances, self.model_param) if self.model_param.method == consts.OPTIMAL: has_missing_value = self.iv_calculator.check_containing_missing_value(data_instances) for idx in self.bin_inner_param.bin_indexes: if idx in has_missing_value: raise ValueError(f"Optimal Binning do not support missing value now.") if self.model_param.split_points_by_col_name or self.model_param.split_points_by_index: split_points = self._get_manual_split_points(data_instances) self.use_manual_split_points = True for col_name, sp in split_points.items(): self.binning_obj.bin_results.put_col_split_points(col_name, sp) else: # Calculates split points of data in self part split_points = self.binning_obj.fit_split_points(data_instances) self.record_missing(data_instances) return self.stat_and_transform(data_instances, split_points) def transform(self, data_instances): self._setup_bin_inner_param(data_instances, self.model_param) split_points = self.binning_obj.bin_results.all_split_points return self.stat_and_transform(data_instances, split_points) def stat_and_transform(self, data_instances, split_points): """ Apply binning method for both data instances in local party as well as the other one. Afterwards, calculate the specific metric value for specific columns. """ if self.model_param.skip_static: # if self.transform_type != 'woe': data_instances = self.transform_data(data_instances) """ else: raise ValueError("Woe transform is not supported in host parties.") """ self.set_schema(data_instances) self.data_output = data_instances return data_instances if not self.model_param.local_only: has_label = True if self._stage == "transform": has_label = self.transfer_variable.transform_stage_has_label.get(idx=0) if has_label: self.compressor = CipherCompressorHost() self._sync_init_bucket(data_instances, split_points) if self.model_param.method == consts.OPTIMAL and self._stage == "fit": self.optimal_binning_sync() # if self.transform_type != 'woe': data_instances = self.transform_data(data_instances) self.set_schema(data_instances) self.data_output = data_instances total_summary = self.binning_obj.bin_results.to_json() self.set_summary(total_summary) return data_instances def _sync_init_bucket(self, data_instances, split_points, need_shuffle=False): data_bin_table = self.binning_obj.get_data_bin(data_instances, split_points, self.bin_inner_param.bin_cols_map) # LOGGER.debug("data_bin_table, count: {}".format(data_bin_table.count())) encrypted_label_table = self.transfer_variable.encrypted_label.get(idx=0) LOGGER.info("Get encrypted_label_table from guest") encrypted_bin_sum = self.__static_encrypted_bin_label(data_bin_table, encrypted_label_table) encrypted_bin_sum = self.compressor.compress_dtable(encrypted_bin_sum) encode_name_f = functools.partial(self.bin_inner_param.change_to_anonymous, col_name_anonymous_maps=self.bin_inner_param.col_name_anonymous_maps) # encrypted_bin_sum = self.bin_inner_param.encode_col_name_dict(encrypted_bin_sum, self) encrypted_bin_sum = encrypted_bin_sum.map(encode_name_f) # encrypted_bin_sum = self.cipher_compress(encrypted_bin_sum, data_bin_table.count()) self.transfer_variable.encrypted_bin_sum.remote(encrypted_bin_sum, role=consts.GUEST, idx=0) send_result = { "category_names": self.bin_inner_param.get_anonymous_col_name_list( self.bin_inner_param.category_names), "bin_method": self.model_param.method, "optimal_params": { "metric_method": self.model_param.optimal_binning_param.metric_method, "bin_num": self.model_param.bin_num, "mixture": self.model_param.optimal_binning_param.mixture, "max_bin_pct": self.model_param.optimal_binning_param.max_bin_pct, "min_bin_pct": self.model_param.optimal_binning_param.min_bin_pct } } self.transfer_variable.optimal_info.remote(send_result, role=consts.GUEST, idx=0) def __static_encrypted_bin_label(self, data_bin_table, encrypted_label): # data_bin_with_label = data_bin_table.join(encrypted_label, lambda x, y: (x, y)) label_counts = encrypted_label.reduce(operator.add) sparse_bin_points = self.binning_obj.get_sparse_bin(self.bin_inner_param.bin_indexes, self.binning_obj.bin_results.all_split_points, self.bin_inner_param.header) sparse_bin_points = {self.bin_inner_param.header[k]: v for k, v in sparse_bin_points.items()} encrypted_bin_sum = self.iv_calculator.cal_bin_label( data_bin_table=data_bin_table, sparse_bin_points=sparse_bin_points, label_table=encrypted_label, label_counts=label_counts ) return encrypted_bin_sum @staticmethod def convert_compress_format(col_name, encrypted_bin_sum): """ Parameters ---------- encrypted_bin_sum : list. It is like: {'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ], 'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ], ... } returns ------- {"keys": ['x1', 'x2' ...], "event_counts": [...], "non_event_counts": [...], "bin_num": [...] } """ event_counts = [x[0] for x in encrypted_bin_sum] non_event_counts = [x[1] for x in encrypted_bin_sum] return col_name, {"event_counts": event_counts, "non_event_counts": non_event_counts} def optimal_binning_sync(self): bucket_idx = self.transfer_variable.bucket_idx.get(idx=0) # LOGGER.debug("In optimal_binning_sync, received bucket_idx: {}".format(bucket_idx)) original_split_points = self.binning_obj.bin_results.all_split_points for anonymous_col_name, b_idx in bucket_idx.items(): col_name = self.bin_inner_param.get_col_name_by_anonymous(anonymous_col_name) ori_sp_list = original_split_points.get(col_name) optimal_result = [ori_sp_list[i] for i in b_idx] self.binning_obj.bin_results.put_col_split_points(col_name, optimal_result)
8,421
45.021858
119
py
FATE
FATE-master/python/federatedml/feature/homo_onehot/homo_ohe_base.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # added by jsweng # base class for OHE alignment import functools from federatedml.feature import one_hot_encoder from federatedml.param.homo_onehot_encoder_param import HomoOneHotParam from federatedml.transfer_variable.transfer_class.homo_onehot_transfer_variable import HomoOneHotTransferVariable from federatedml.util import LOGGER from federatedml.util import consts class HomoOneHotBase(one_hot_encoder.OneHotEncoder): def __init__(self): super(HomoOneHotBase, self).__init__() self.model_name = 'OHEAlignment' self.model_param_name = 'OHEAlignmentParam' self.model_meta_name = 'OHEAlignmentMeta' self.model_param = HomoOneHotParam() def _init_model(self, params): super(HomoOneHotBase, self)._init_model(params) # self.re_encrypt_batches = params.re_encrypt_batches self.need_alignment = params.need_alignment self.transfer_variable = HomoOneHotTransferVariable() def _init_params(self, data_instances): if data_instances is None: return super(HomoOneHotBase, self)._init_params(data_instances) def fit(self, data_instances): """This function allows for one-hot-encoding of the columns with or without alignment with the other parties in the federated learning. Args: data_instances: data the guest has access to Returns: if alignment is on, then the one-hot-encoding data_instances are done with alignment with parties involved in federated learning else, the data is one-hot-encoded independently """ self._init_params(data_instances) self._abnormal_detection(data_instances) # keep a copy of original header ori_header = self.inner_param.header.copy() # obtain the individual column headers with their values f1 = functools.partial(self.record_new_header, inner_param=self.inner_param) self.col_maps = data_instances.applyPartitions(f1).reduce(self.merge_col_maps) col_maps = {} for col_name, pair_obj in self.col_maps.items(): values = [x for x in pair_obj.values] col_maps[col_name] = values # LOGGER.debug("new col_maps is: {}".format(col_maps)) if self.need_alignment: # Send col_maps to arbiter if self.role == consts.HOST: self.transfer_variable.host_columns.remote(col_maps, role=consts.ARBITER, idx=-1) elif self.role == consts.GUEST: self.transfer_variable.guest_columns.remote(col_maps, role=consts.ARBITER, idx=-1) # Receive aligned columns from arbiter aligned_columns = self.transfer_variable.aligned_columns.get(idx=-1) aligned_col_maps = aligned_columns[0] # LOGGER.debug("{} aligned columns received are: {}".format(self.role, aligned_col_maps)) self.col_maps = {} for col_name, value_list in aligned_col_maps.items(): value_set = set([str(x) for x in value_list]) if len(value_set) != len(value_list): raise ValueError("Same values with different types have occurred among different parties") transfer_pair = one_hot_encoder.TransferPair(col_name) for v in value_list: transfer_pair.add_value(v) transfer_pair.encode_new_headers() self.col_maps[col_name] = transfer_pair self._transform_schema() data_instances = self.transform(data_instances) # LOGGER.debug( # "[Result][OHEAlignment{}] After transform in fit, schema is : {}, header: {}".format(self.role, self.schema, # self.inner_param.header)) return data_instances
4,612
38.767241
124
py
FATE
FATE-master/python/federatedml/feature/homo_onehot/homo_ohe_arbiter.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # added by jsweng # alignment arbiter from collections import defaultdict from federatedml.feature.homo_onehot.homo_ohe_base import HomoOneHotBase from federatedml.util import LOGGER from federatedml.util import consts class HomoOneHotArbiter(HomoOneHotBase): def __init__(self): super(HomoOneHotArbiter, self).__init__() def combine_all_column_headers(self, guest_columns, host_columns): """ This is used when there is a need for alignment within the federated learning. The function would align the column headers from guest and host and send the new aligned headers back. Returns: Combine all the column headers from guest and host if there is alignment is used """ all_cols_dict = defaultdict(set) # Obtain all the guest headers for guest_cols in guest_columns: for k, v in guest_cols.items(): all_cols_dict[k].update(v) # Obtain all the host headers for host_cols in host_columns: for k, v in host_cols.items(): all_cols_dict[k].update(v) # Align all of them together combined_all_cols = {} for el in all_cols_dict.keys(): combined_all_cols[el] = list(all_cols_dict[el]) LOGGER.debug("{} combined cols: {}".format(self.role, combined_all_cols)) return combined_all_cols def fit(self, data_instances=None): if self.need_alignment: guest_columns = self.transfer_variable.guest_columns.get(idx=-1) # getting guest column host_columns = self.transfer_variable.host_columns.get(idx=-1) # getting host column combined_all_cols = self.combine_all_column_headers(guest_columns, host_columns) # Send the aligned headers back to guest and host self.transfer_variable.aligned_columns.remote(combined_all_cols, role=consts.HOST, idx=-1) self.transfer_variable.aligned_columns.remote(combined_all_cols, role=consts.GUEST, idx=-1) def _get_meta(self): pass def _get_param(self): pass def export_model(self): return None def _load_model(self, model_dict): pass def transform(self, data_instances): pass def load_model(self, model_dict): pass
3,005
31.322581
103
py
FATE
FATE-master/python/federatedml/feature/homo_onehot/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # added by jsweng #
685
30.181818
75
py
FATE
FATE-master/python/federatedml/feature/binning/base_binning.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bisect import copy import functools import numpy as np from federatedml.feature.binning.bin_inner_param import BinInnerParam from federatedml.feature.binning.bin_result import SplitPointsResult from federatedml.feature.sparse_vector import SparseVector from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.statistic import data_overview from federatedml.statistic.data_overview import get_header, get_anonymous_header from federatedml.util import LOGGER # from federatedml.statistic import statics class BaseBinning(object): """ This is use for discrete data so that can transform data or use information for feature selection. """ def __init__(self, params=None, abnormal_list=None, labels=None): self.bin_inner_param: BinInnerParam = None self.is_multi_class = False self.bin_results = SplitPointsResult() if params is None: return if isinstance(params, FeatureBinningParam): self.params = params else: self.params = None self.bin_num = params.bin_num self.abnormal_list = abnormal_list self.split_points = None @property def header(self): return self.bin_inner_param.header # @property # def split_points(self): # return self.bin_results.all_split_points def _default_setting(self, header, anonymous_header): if self.bin_inner_param is not None: return self.bin_inner_param = BinInnerParam() self.bin_inner_param.set_header(header, anonymous_header) if self.params.bin_indexes == -1: self.bin_inner_param.set_bin_all() else: self.bin_inner_param.add_bin_indexes(self.params.bin_indexes) self.bin_inner_param.add_bin_names(self.params.bin_names) self.bin_inner_param.add_category_indexes(self.params.category_indexes) self.bin_inner_param.add_category_names(self.params.category_names) if self.params.transform_param.transform_cols == -1: self.bin_inner_param.set_transform_all() else: self.bin_inner_param.add_transform_bin_indexes(self.params.transform_param.transform_cols) self.bin_inner_param.add_transform_bin_names(self.params.transform_param.transform_names) def fit_split_points(self, data_instances): """ Get split points Parameters ---------- data_instances : Table The input data Returns ------- split_points : dict. Each value represent for the split points for a feature. The element in each row represent for the corresponding split point. e.g. split_points = {'x1': [0.1, 0.2, 0.3, 0.4 ...], # The first feature 'x2': [1, 2, 3, 4, ...], # The second feature ...] # Other features """ pass def fit_category_features(self, data_instances): is_sparse = data_overview.is_sparse_data(data_instances) if len(self.bin_inner_param.category_indexes) > 0: statics_obj = data_overview.DataStatistics() category_col_values = statics_obj.static_all_values(data_instances, self.bin_inner_param.category_indexes, is_sparse) for col_name, split_points in zip(self.bin_inner_param.category_names, category_col_values): self.bin_results.put_col_split_points(col_name, split_points) self.bin_results.put_col_optimal_metric_array(col_name, None) def set_bin_inner_param(self, bin_inner_param): self.bin_inner_param = bin_inner_param def transform(self, data_instances, transform_type): # self._init_cols(data_instances) for col_name in self.bin_inner_param.transform_bin_names: if col_name not in self.bin_inner_param.col_name_maps: raise ValueError("Transform col_name: {} is not existed".format(col_name)) if transform_type == 'bin_num': data_instances, _, _ = self.convert_feature_to_bin(data_instances) elif transform_type == 'woe': data_instances = self.convert_feature_to_woe(data_instances) return data_instances @staticmethod def get_data_bin(data_instances, split_points, bin_cols_map): """ Apply the binning method Parameters ---------- data_instances : Table The input data split_points : dict. Each value represent for the split points for a feature. The element in each row represent for the corresponding split point. e.g. split_points = {'x1': [0.1, 0.2, 0.3, 0.4 ...], # The first feature 'x2': [1, 2, 3, 4, ...], # The second feature ...] # Other features Returns ------- data_bin_table : Table. Each element represent for the corresponding bin number this feature belongs to. e.g. it could be: [{'x1': 1, 'x2': 5, 'x3': 2} ... ] """ # self._init_cols(data_instances) is_sparse = data_overview.is_sparse_data(data_instances) header = data_instances.schema.get('header') f = functools.partial(BaseBinning.bin_data, split_points=split_points, cols_dict=bin_cols_map, header=header, is_sparse=is_sparse) data_bin_dict = data_instances.mapValues(f) return data_bin_dict def convert_feature_to_woe(self, data_instances): is_sparse = data_overview.is_sparse_data(data_instances) schema = data_instances.schema abnormal_list = self.abnormal_list if self.abnormal_list is None: abnormal_list = [] if is_sparse: f = functools.partial(self._convert_sparse_data, bin_inner_param=self.bin_inner_param, bin_results=self.bin_results, abnormal_list=abnormal_list, convert_type='woe' ) new_data = data_instances.mapValues(f) else: f = functools.partial(self._convert_dense_data, bin_inner_param=self.bin_inner_param, bin_results=self.bin_results, abnormal_list=abnormal_list, convert_type='woe') new_data = data_instances.mapValues(f) new_data.schema = schema return new_data def convert_feature_to_bin(self, data_instances, split_points=None): is_sparse = data_overview.is_sparse_data(data_instances) schema = data_instances.schema abnormal_list = self.abnormal_list if self.abnormal_list is None: abnormal_list = [] if split_points is None: split_points = self.bin_results.all_split_points else: for col_name, sp in split_points.items(): self.bin_results.put_col_split_points(col_name, sp) if is_sparse: f = functools.partial(self._convert_sparse_data, bin_inner_param=self.bin_inner_param, bin_results=self.bin_results, abnormal_list=abnormal_list, convert_type='bin_num' ) new_data = data_instances.mapValues(f) else: f = functools.partial(self._convert_dense_data, bin_inner_param=self.bin_inner_param, bin_results=self.bin_results, abnormal_list=abnormal_list, convert_type='bin_num') new_data = data_instances.mapValues(f) new_data.schema = schema header = get_header(data_instances) bin_sparse = self.get_sparse_bin(self.bin_inner_param.transform_bin_indexes, split_points, header) split_points_result = self.bin_results.get_split_points_array(self.bin_inner_param.transform_bin_names) return new_data, split_points_result, bin_sparse def _setup_bin_inner_param(self, data_instances, params): if self.bin_inner_param is not None: return self.bin_inner_param = BinInnerParam() header = get_header(data_instances) anonymous_header = get_anonymous_header(data_instances) LOGGER.debug("_setup_bin_inner_param, get header length: {}".format(len(self.header))) self.schema = data_instances.schema self.bin_inner_param.set_header(header, anonymous_header) if params.bin_indexes == -1: self.bin_inner_param.set_bin_all() else: self.bin_inner_param.add_bin_indexes(params.bin_indexes) self.bin_inner_param.add_bin_names(params.bin_names) self.bin_inner_param.add_category_indexes(params.category_indexes) self.bin_inner_param.add_category_names(params.category_names) if params.transform_param.transform_cols == -1: self.bin_inner_param.set_transform_all() else: self.bin_inner_param.add_transform_bin_indexes(params.transform_param.transform_cols) self.bin_inner_param.add_transform_bin_names(params.transform_param.transform_names) self.set_bin_inner_param(self.bin_inner_param) @staticmethod def _convert_sparse_data(instances, bin_inner_param: BinInnerParam, bin_results: SplitPointsResult, abnormal_list: list, convert_type: str = 'bin_num'): instances = copy.deepcopy(instances) all_data = instances.features.get_all_data() data_shape = instances.features.get_shape() indice = [] sparse_value = [] transform_cols_idx_set = bin_inner_param.transform_bin_indexes_added_set split_points_dict = bin_results.all_split_points for col_idx, col_value in all_data: if col_idx in transform_cols_idx_set: if col_value in abnormal_list: indice.append(col_idx) sparse_value.append(col_value) continue # Maybe it is because missing value add in sparse value, but col_name = bin_inner_param.header[col_idx] split_points = split_points_dict[col_name] bin_num = BaseBinning.get_bin_num(col_value, split_points) indice.append(col_idx) if convert_type == 'bin_num': sparse_value.append(bin_num) else: sparse_value.append(col_value) else: indice.append(col_idx) sparse_value.append(col_value) sparse_vector = SparseVector(indice, sparse_value, data_shape) instances.features = sparse_vector return instances @staticmethod def get_sparse_bin(transform_cols_idx, split_points_dict, header): """ Get which bins the 0 located at for each column. Returns ------- Dict of sparse bin num {0: 2, 1: 3, 2:5 ... } """ result = {} for col_idx in transform_cols_idx: col_name = header[col_idx] split_points = split_points_dict[col_name] sparse_bin_num = BaseBinning.get_bin_num(0, split_points) result[col_idx] = sparse_bin_num return result @staticmethod def _convert_dense_data(instances, bin_inner_param: BinInnerParam, bin_results: SplitPointsResult, abnormal_list: list, convert_type: str = 'bin_num'): instances = copy.deepcopy(instances) features = instances.features transform_cols_idx_set = bin_inner_param.transform_bin_indexes_added_set split_points_dict = bin_results.all_split_points for col_idx, col_value in enumerate(features): if col_idx in transform_cols_idx_set: if col_value in abnormal_list: features[col_idx] = col_value continue col_name = bin_inner_param.header[col_idx] split_points = split_points_dict[col_name] bin_num = BaseBinning.get_bin_num(col_value, split_points) if convert_type == 'bin_num': features[col_idx] = bin_num else: features[col_idx] = col_value instances.features = features return instances @staticmethod def convert_bin_counts_table(result_counts, idx): """ Given event count information calculate iv information Parameters ---------- result_counts: table. It is like: ('x1': [[label_0_count, label_1_count, ...], [label_0_count, label_1_count, ...] ... ], 'x2': [[label_0_count, label_1_count, ...], [label_0_count, label_1_count, ...] ... ], ... ) idx: int Returns ------- ('x1': [[event_count, non_event_count], [event_count, non_event_count] ... ], 'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ], ... ) """ def _convert(list_counts): res = [] for c_array in list_counts: event_count = c_array[idx] non_event_count = np.sum(c_array) - event_count res.append([event_count, non_event_count]) return res return result_counts.mapValues(_convert) @staticmethod def fill_sparse_result(col_name, static_nums, sparse_bin_points, label_counts): """ Parameters ---------- static_nums : list. It is like: [[event_count, total_num], [event_count, total_num] ... ] sparse_bin_points : dict Dict of sparse bin num {"x1": 2, "x2": 3, "x3": 5 ... } label_counts: np.array eg. [100, 200, ...] Returns ------- The format is same as result_counts. """ curt_all = functools.reduce(lambda x, y: x + y, static_nums) sparse_bin = sparse_bin_points.get(col_name) static_nums[sparse_bin] = label_counts - curt_all return col_name, static_nums @staticmethod def bin_data(instance, split_points, cols_dict, header, is_sparse): """ Apply the binning method Parameters ---------- instance : Table The input data split_points : dict. Each value represent for the split points for a feature. The element in each row represent for the corresponding split point. e.g. split_points = {'x1': [0.1, 0.2, 0.3, 0.4 ...], # The first feature 'x2': [1, 2, 3, 4, ...], # The second feature ...] # Other features cols_dict: dict Record key, value pairs where key is cols' name, and value is cols' index. header: list header of Table is_sparse: bool Specify whether it is sparse data or not Returns ------- result_bin_dict : dict. Each element represent for the corresponding bin number this feature belongs to. e.g. it could be: [{1: 1, 2: 5, 3: 2} ... ] # Each number represent for the bin number it belongs to. """ result_bin_nums = {} if is_sparse: sparse_data = instance.features.get_all_data() for col_idx, col_value in sparse_data: col_name = header[col_idx] if col_name in cols_dict: col_split_points = split_points[col_name] col_bin_num = BaseBinning.get_bin_num(col_value, col_split_points) result_bin_nums[col_name] = col_bin_num return result_bin_nums # For dense data for col_name, col_index in cols_dict.items(): col_split_points = split_points[col_name] value = instance.features[col_index] col_bin_num = BaseBinning.get_bin_num(value, col_split_points) result_bin_nums[col_name] = col_bin_num return result_bin_nums @staticmethod def get_bin_num(value, split_points): if np.isnan(value): return len(split_points) sp = split_points[:-1] col_bin_num = bisect.bisect_left(sp, value) # col_bin_num = bisect.bisect_left(split_points, value) return col_bin_num @staticmethod def add_label_in_partition_bak(data_bin_with_table, sparse_bin_points): """ Add all label, so that become convenient to calculate woe and iv Parameters ---------- data_bin_with_table : Table The input data, the Table is like: (id, {'x1': 1, 'x2': 5, 'x3': 2}, y) sparse_bin_points: dict Dict of sparse bin num {0: 2, 1: 3, 2:5 ... } Returns ------- result_sum: the result Table. It is like: {'x1': [[event_count, total_num], [event_count, total_num] ... ], 'x2': [[event_count, total_num], [event_count, total_num] ... ], ... } """ result_sum = {} for _, datas in data_bin_with_table: bin_idx_dict = datas[0] y = datas[1] # y = y_combo[0] # inverse_y = y_combo[1] for col_name, bin_idx in bin_idx_dict.items(): result_sum.setdefault(col_name, []) col_sum = result_sum[col_name] while bin_idx >= len(col_sum): col_sum.append([0, 0]) if bin_idx == sparse_bin_points[col_name]: continue label_sum = col_sum[bin_idx] label_sum[0] = label_sum[0] + y label_sum[1] = label_sum[1] + 1 col_sum[bin_idx] = label_sum result_sum[col_name] = col_sum return list(result_sum.items())
19,553
36.968932
111
py
FATE
FATE-master/python/federatedml/feature/binning/iv_calculator.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import math import operator import numpy as np from federatedml.feature.binning.base_binning import BaseBinning from federatedml.feature.binning.bin_result import BinColResults, MultiClassBinResult from federatedml.statistic import data_overview from federatedml.feature.sparse_vector import SparseVector from federatedml.cipher_compressor.compressor import PackingCipherTensor from federatedml.util import LOGGER class IvCalculator(object): def __init__(self, adjustment_factor, role, party_id): self.adjustment_factor = adjustment_factor self.role = role self.party_id = party_id def cal_local_iv(self, data_instances, split_points, labels=None, label_counts=None, bin_cols_map=None, label_table=None): """ data_bin_table : Table. Each element represent for the corresponding bin number this feature belongs to. e.g. it could be: [{'x1': 1, 'x2': 5, 'x3': 2} ... ] Returns: MultiClassBinResult object """ header = data_instances.schema.get("header") if bin_cols_map is None: bin_cols_map = {name: idx for idx, name in enumerate(header)} bin_indexes = [idx for idx, _ in enumerate(header)] else: bin_indexes = [] for h in header: if h in bin_cols_map: bin_indexes.append(bin_cols_map[h]) if label_counts is None: label_counts = data_overview.get_label_count(data_instances) labels = sorted(label_counts.keys()) labels.reverse() label_counts = [label_counts[k] for k in labels] data_bin_table = BaseBinning.get_data_bin(data_instances, split_points, bin_cols_map) sparse_bin_points = BaseBinning.get_sparse_bin(bin_indexes, split_points, header) sparse_bin_points = {header[k]: v for k, v in sparse_bin_points.items()} if label_table is None: label_table = self.convert_label(data_instances, labels) result_counts = self.cal_bin_label(data_bin_table, sparse_bin_points, label_table, label_counts) multi_bin_res = self.cal_iv_from_counts(result_counts, labels, role=self.role, party_id=self.party_id) for col_name, sp in split_points.items(): multi_bin_res.put_col_split_points(col_name, sp) return multi_bin_res def cal_iv_from_counts(self, result_counts, labels, role, party_id): result = MultiClassBinResult(labels) result.set_role_party(role, party_id) if len(labels) == 2: col_result_obj_dict = self.cal_single_label_iv_woe(result_counts, self.adjustment_factor) for col_name, bin_col_result in col_result_obj_dict.items(): result.put_col_results(col_name=col_name, col_results=bin_col_result) else: for label_idx, y in enumerate(labels): this_result_counts = self.mask_label(result_counts, label_idx) col_result_obj_dict = self.cal_single_label_iv_woe(this_result_counts, self.adjustment_factor) for col_name, bin_col_result in col_result_obj_dict.items(): result.put_col_results(col_name=col_name, col_results=bin_col_result, label_idx=label_idx) return result @staticmethod def mask_label(result_counts, label_idx): def _mask(counts): res = [] for c in counts: res.append(np.array([c[label_idx], np.sum(c) - c[label_idx]])) return res return result_counts.mapValues(_mask) def cal_bin_label(self, data_bin_table, sparse_bin_points, label_table, label_counts): """ data_bin_table : Table. Each element represent for the corresponding bin number this feature belongs to. e.g. it could be: [{'x1': 1, 'x2': 5, 'x3': 2} ... ] sparse_bin_points: dict Dict of sparse bin num {"x0": 2, "x1": 3, "x2": 5 ... } label_table : Table id with labels Returns: Table with value: [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ] """ data_bin_with_label = data_bin_table.join(label_table, lambda x, y: (x, y)) f = functools.partial(self.add_label_in_partition, sparse_bin_points=sparse_bin_points) result_counts = data_bin_with_label.mapReducePartitions(f, self.aggregate_partition_label) return result_counts def cal_single_label_iv_woe(self, result_counts, adjustment_factor): """ Given event count information calculate iv information Parameters ---------- result_counts: dict or table. It is like: {'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ], 'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ], ... } adjustment_factor : float The adjustment factor when calculating WOE Returns ------- Dict of IVAttributes object {'x1': attr_obj, 'x2': attr_obj ... } """ if isinstance(result_counts, dict): col_result_obj_dict = {} for col_name, data_event_count in result_counts.items(): col_result_obj = self.woe_1d(data_event_count, adjustment_factor) col_result_obj_dict[col_name] = col_result_obj else: woe_1d = functools.partial(self.woe_1d, adjustment_factor=adjustment_factor) col_result_obj_dict = dict(result_counts.mapValues(woe_1d).collect()) return col_result_obj_dict @staticmethod def fill_sparse_result(col_name, static_nums, sparse_bin_points, label_counts): """ Parameters ---------- col_name: str current col_name, use to obtain sparse point static_nums : list. It is like: [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ] where the bin of sparse point located in is empty. sparse_bin_points : dict Dict of sparse bin num {"x1": 2, "x2": 3, "x3": 5 ... } label_counts: np.array eg. [100, 200, ...] Returns ------- The format is same as static_nums. """ curt_all = functools.reduce(lambda x, y: x + y, static_nums) sparse_bin = sparse_bin_points.get(col_name) static_nums[sparse_bin] = label_counts - curt_all return col_name, static_nums @staticmethod def combine_labels(result_counts, idx): """ result_counts: Table [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ] idx: int Returns: """ @staticmethod def add_label_in_partition(data_bin_with_table, sparse_bin_points): """ Add all label, so that become convenient to calculate woe and iv Parameters ---------- data_bin_with_table : Table The input data, the Table is like: (id, {'x1': 1, 'x2': 5, 'x3': 2}, y) where y = [is_label_0, is_label_1, ...] which is one-hot format array of label sparse_bin_points: dict Dict of sparse bin num {0: 2, 1: 3, 2:5 ... } Returns ------- ['x1', [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ], 'x2', [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ], ... ] """ result_sum = {} for _, datas in data_bin_with_table: bin_idx_dict = datas[0] y = datas[1] for col_name, bin_idx in bin_idx_dict.items(): result_sum.setdefault(col_name, []) col_sum = result_sum[col_name] while bin_idx >= len(col_sum): if isinstance(y, PackingCipherTensor): zero_y = np.zeros(y.dim) col_sum.append(PackingCipherTensor(zero_y.tolist())) else: col_sum.append(np.zeros(len(y))) # if bin_idx == sparse_bin_points[col_name]: # continue col_sum[bin_idx] = col_sum[bin_idx] + y return list(result_sum.items()) @staticmethod def aggregate_partition_label(sum1, sum2): """ Used in reduce function. Aggregate the result calculate from each partition. Parameters ---------- sum1 : list. It is like: [[label_0_sum, label_1_sum, ...], [label_0_sum, label_1_sum, ...] ... ] sum2 : list Same as sum1 Returns ------- Merged sum. The format is same as sum1. """ if sum1 is None and sum2 is None: return None if sum1 is None: return sum2 if sum2 is None: return sum1 for idx, label_sum2 in enumerate(sum2): if idx >= len(sum1): sum1.append(label_sum2) else: sum1[idx] = sum1[idx] + label_sum2 return sum1 @staticmethod def woe_1d(data_event_count, adjustment_factor): """ Given event and non-event count in one column, calculate its woe value. Parameters ---------- data_event_count : list [(event_sum, non-event_sum), (same sum in second_bin), (in third bin) ...] adjustment_factor : float The adjustment factor when calculating WOE Returns ------- IVAttributes : object Stored information that related iv and woe value """ event_total = 0 non_event_total = 0 for bin_res in data_event_count: if len(bin_res) != 2: raise ValueError(f"bin_res should has length of 2," f" data_event_count: {data_event_count}, bin_res: {bin_res}") event_total += bin_res[0] non_event_total += bin_res[1] if event_total == 0: # raise ValueError("NO event label in target data") event_total = 1 if non_event_total == 0: # raise ValueError("NO non-event label in target data") non_event_total = 1 iv = 0 event_count_array = [] non_event_count_array = [] event_rate_array = [] non_event_rate_array = [] woe_array = [] iv_array = [] for event_count, non_event_count in data_event_count: if event_count == 0 or non_event_count == 0: event_rate = 1.0 * (event_count + adjustment_factor) / event_total non_event_rate = 1.0 * (non_event_count + adjustment_factor) / non_event_total else: event_rate = 1.0 * event_count / event_total non_event_rate = 1.0 * non_event_count / non_event_total woe_i = math.log(event_rate / non_event_rate) event_count_array.append(int(event_count)) non_event_count_array.append(int(non_event_count)) event_rate_array.append(event_rate) non_event_rate_array.append(non_event_rate) woe_array.append(woe_i) iv_i = (event_rate - non_event_rate) * woe_i iv_array.append(iv_i) iv += iv_i return BinColResults(woe_array=woe_array, iv_array=iv_array, event_count_array=event_count_array, non_event_count_array=non_event_count_array, event_rate_array=event_rate_array, non_event_rate_array=non_event_rate_array, iv=iv) @staticmethod def statistic_label(data_instances): label_counts = data_overview.get_label_count(data_instances) label_elements = list(label_counts.keys()) label_counts = [label_counts[k] for k in label_elements] return label_elements, label_counts @staticmethod def convert_label(data_instances, label_elements): def _convert(instance): res_labels = np.zeros(len(label_elements)) res_labels[label_elements.index(instance.label)] = 1 return res_labels label_table = data_instances.mapValues(_convert) return label_table @staticmethod def woe_transformer(data_instances, bin_inner_param, multi_class_bin_res: MultiClassBinResult, abnormal_list=None): if abnormal_list is None: abnormal_list = [] if len(multi_class_bin_res.bin_results) > 1: raise ValueError(f"WOE transform of multi-class-labeled data not supported. Please check!") bin_res = multi_class_bin_res.bin_results[0] transform_cols_idx = bin_inner_param.transform_bin_indexes split_points_dict = bin_res.all_split_points is_sparse = data_overview.is_sparse_data(data_instances) def convert(instances): if is_sparse: all_data = instances.features.get_all_data() indice = [] sparse_value = [] data_shape = instances.features.get_shape() for col_idx, col_value in all_data: if col_idx in transform_cols_idx: if col_value in abnormal_list: indice.append(col_idx) sparse_value.append(col_value) continue # Maybe it is because missing value add in sparse value, but col_name = bin_inner_param.header[col_idx] split_points = split_points_dict[col_name] bin_num = BaseBinning.get_bin_num(col_value, split_points) indice.append(col_idx) col_results = bin_res.all_cols_results.get(col_name) woe_value = col_results.woe_array[bin_num] sparse_value.append(woe_value) else: indice.append(col_idx) sparse_value.append(col_value) sparse_vector = SparseVector(indice, sparse_value, data_shape) instances.features = sparse_vector else: features = instances.features assert isinstance(features, np.ndarray) transform_cols_idx_set = set(transform_cols_idx) for col_idx, col_value in enumerate(features): if col_idx in transform_cols_idx_set: if col_value in abnormal_list: features[col_idx] = col_value continue col_name = bin_inner_param.header[col_idx] split_points = split_points_dict[col_name] bin_num = BaseBinning.get_bin_num(col_value, split_points) col_results = bin_res.all_cols_results.get(col_name) if np.isnan(col_value) and len(col_results.woe_array) == bin_num: raise ValueError("Missing value found in transform data, " "but the training data does not have missing value, " "this is not supported.") woe_value = col_results.woe_array[bin_num] features[col_idx] = woe_value instances.features = features return instances return data_instances.mapValues(convert) @staticmethod def check_containing_missing_value(data_instances): is_sparse = data_overview.is_sparse_data(data_instances) def _sparse_check(instance): result = set() sparse_data = instance.features.get_all_data() for col_idx, col_value in sparse_data: if np.isnan(col_value): result.add(col_idx) return result if is_sparse: has_missing_value = data_instances.mapValues(_sparse_check).reduce( lambda a, b: a.union(b) ) else: has_missing_value = data_instances.mapValues(lambda x: x.features).reduce(operator.add) has_missing_value = {idx for idx, value in enumerate(has_missing_value) if np.isnan(value)} return has_missing_value
17,696
38.239468
113
py
FATE
FATE-master/python/federatedml/feature/binning/bin_inner_param.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from federatedml.util import LOGGER class BinInnerParam(object): """ Use to store columns related params for binning process """ def __init__(self): self.bin_indexes = [] self.bin_names = [] self.bin_indexes_added_set = set() self.col_name_maps = {} self.anonymous_col_name_maps = {} self.col_name_anonymous_maps = {} self.header = [] self.anonymous_header = [] self.transform_bin_indexes = [] self.transform_bin_names = [] self.transform_bin_indexes_added_set = set() self.category_indexes = [] self.category_names = [] self.category_indexes_added_set = set() def set_header(self, header, anonymous_header): self.header = copy.deepcopy(header) self.anonymous_header = copy.deepcopy(anonymous_header) for idx, col_name in enumerate(self.header): self.col_name_maps[col_name] = idx self.anonymous_col_name_maps = dict(zip(self.anonymous_header, self.header)) self.col_name_anonymous_maps = dict(zip(self.header, self.anonymous_header)) def set_bin_all(self): """ Called when user set to bin all columns """ self.bin_indexes = [i for i in range(len(self.header))] self.bin_indexes_added_set = set(self.bin_indexes) self.bin_names = copy.deepcopy(self.header) def set_transform_all(self): self.transform_bin_indexes = self.bin_indexes self.transform_bin_names = self.bin_names self.transform_bin_indexes.extend(self.category_indexes) self.transform_bin_names.extend(self.category_names) self.transform_bin_indexes_added_set = set(self.transform_bin_indexes) def add_bin_indexes(self, bin_indexes): if bin_indexes is None: return for idx in bin_indexes: if idx >= len(self.header): # LOGGER.warning("Adding a index that out of header's bound") # continue raise ValueError("Adding a index that out of header's bound") if idx not in self.bin_indexes_added_set: self.bin_indexes.append(idx) self.bin_indexes_added_set.add(idx) self.bin_names.append(self.header[idx]) def add_bin_names(self, bin_names): if bin_names is None: return for bin_name in bin_names: idx = self.col_name_maps.get(bin_name) if idx is None: LOGGER.warning("Adding a col_name that is not exist in header") continue if idx not in self.bin_indexes_added_set: self.bin_indexes.append(idx) self.bin_indexes_added_set.add(idx) self.bin_names.append(self.header[idx]) def add_transform_bin_indexes(self, transform_indexes): if transform_indexes is None: return for idx in transform_indexes: if idx >= len(self.header) or idx < 0: raise ValueError("Adding a index that out of header's bound") # LOGGER.warning("Adding a index that out of header's bound") # continue if idx not in self.transform_bin_indexes_added_set: self.transform_bin_indexes.append(idx) self.transform_bin_indexes_added_set.add(idx) self.transform_bin_names.append(self.header[idx]) def add_transform_bin_names(self, transform_names): if transform_names is None: return for bin_name in transform_names: idx = self.col_name_maps.get(bin_name) if idx is None: raise ValueError("Adding a col_name that is not exist in header") if idx not in self.transform_bin_indexes_added_set: self.transform_bin_indexes.append(idx) self.transform_bin_indexes_added_set.add(idx) self.transform_bin_names.append(self.header[idx]) def add_category_indexes(self, category_indexes): if category_indexes == -1: category_indexes = [i for i in range(len(self.header))] elif category_indexes is None: return for idx in category_indexes: if idx >= len(self.header): LOGGER.warning("Adding a index that out of header's bound") continue if idx not in self.category_indexes_added_set: self.category_indexes.append(idx) self.category_indexes_added_set.add(idx) self.category_names.append(self.header[idx]) if idx in self.bin_indexes_added_set: self.bin_indexes_added_set.remove(idx) self._align_bin_index() def add_category_names(self, category_names): if category_names is None: return for bin_name in category_names: idx = self.col_name_maps.get(bin_name) if idx is None: LOGGER.warning("Adding a col_name that is not exist in header") continue if idx not in self.category_indexes_added_set: self.category_indexes.append(idx) self.category_indexes_added_set.add(idx) self.category_names.append(self.header[idx]) if idx in self.bin_indexes_added_set: self.bin_indexes_added_set.remove(idx) self._align_bin_index() def _align_bin_index(self): if len(self.bin_indexes_added_set) != len(self.bin_indexes): new_bin_indexes = [] new_bin_names = [] for idx in self.bin_indexes: if idx in self.bin_indexes_added_set: new_bin_indexes.append(idx) new_bin_names.append(self.header[idx]) self.bin_indexes = new_bin_indexes self.bin_names = new_bin_names def get_need_cal_iv_cols_map(self): names = self.bin_names + self.category_names indexs = self.bin_indexes + self.category_indexes assert len(names) == len(indexs) return dict(zip(names, indexs)) @property def bin_cols_map(self): assert len(self.bin_indexes) == len(self.bin_names) return dict(zip(self.bin_names, self.bin_indexes)) @staticmethod def change_to_anonymous(col_name, v, col_name_anonymous_maps: dict): anonymous_col = col_name_anonymous_maps.get(col_name) return anonymous_col, v def get_anonymous_col_name_list(self, col_name_list: list): result = [] for x in col_name_list: result.append(self.col_name_anonymous_maps[x]) return result def get_col_name_by_anonymous(self, anonymous_col_name: str): return self.anonymous_col_name_maps.get(anonymous_col_name)
7,522
36.994949
84
py
FATE
FATE-master/python/federatedml/feature/binning/quantile_binning.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import uuid from fate_arch.common.versions import get_eggroll_version from federatedml.feature.binning.base_binning import BaseBinning from federatedml.feature.binning.quantile_summaries import quantile_summary_factory from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.statistic import data_overview from federatedml.util import LOGGER from federatedml.util import consts import numpy as np class QuantileBinning(BaseBinning): """ After quantile binning, the numbers of elements in each binning are equal. The result of this algorithm has the following deterministic bound: If the data_instances has N elements and if we request the quantile at probability `p` up to error `err`, then the algorithm will return a sample `x` from the data so that the *exact* rank of `x` is close to (p * N). More precisely, {{{ floor((p - 2 * err) * N) <= rank(x) <= ceil((p + 2 * err) * N) }}} This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). """ def __init__(self, params: FeatureBinningParam, abnormal_list=None, allow_duplicate=False): super(QuantileBinning, self).__init__(params, abnormal_list) self.summary_dict = None self.allow_duplicate = allow_duplicate def fit_split_points(self, data_instances): """ Apply the binning method Parameters ---------- data_instances : Table The input data Returns ------- split_points : dict. Each value represent for the split points for a feature. The element in each row represent for the corresponding split point. e.g. split_points = {'x1': [0.1, 0.2, 0.3, 0.4 ...], # The first feature 'x2': [1, 2, 3, 4, ...], # The second feature ... # Other features } """ header = data_overview.get_header(data_instances) anonymous_header = data_overview.get_anonymous_header(data_instances) LOGGER.debug("Header length: {}".format(len(header))) self._default_setting(header, anonymous_header) # self._init_cols(data_instances) percent_value = 1.0 / self.bin_num # calculate the split points percentile_rate = [i * percent_value for i in range(1, self.bin_num)] percentile_rate.append(1.0) is_sparse = data_overview.is_sparse_data(data_instances) self._fit_split_point(data_instances, is_sparse, percentile_rate) self.fit_category_features(data_instances) return self.bin_results.all_split_points @staticmethod def copy_merge(s1, s2): # new_s1 = copy.deepcopy(s1) return s1.merge(s2) def _fit_split_point(self, data_instances, is_sparse, percentile_rate): if self.summary_dict is None: f = functools.partial(self.feature_summary, params=self.params, abnormal_list=self.abnormal_list, cols_dict=self.bin_inner_param.bin_cols_map, header=self.header, is_sparse=is_sparse) # summary_dict_table = data_instances.mapReducePartitions(f, self.copy_merge) summary_dict_table = data_instances.mapReducePartitions(f, lambda s1, s2: s1.merge(s2)) # summary_dict = dict(summary_dict.collect()) if is_sparse: total_count = data_instances.count() summary_dict_table = summary_dict_table.mapValues(lambda x: x.set_total_count(total_count)) self.summary_dict = summary_dict_table else: summary_dict_table = self.summary_dict f = functools.partial(self._get_split_points, allow_duplicate=self.allow_duplicate, percentile_rate=percentile_rate) summary_dict = dict(summary_dict_table.mapValues(f).collect()) for col_name, split_point in summary_dict.items(): self.bin_results.put_col_split_points(col_name, split_point) @staticmethod def _get_split_points(summary, percentile_rate, allow_duplicate): split_points = summary.query_percentile_rate_list(percentile_rate) if not allow_duplicate: return np.unique(split_points) else: return np.array(split_points) @staticmethod def feature_summary(data_iter, params, cols_dict, abnormal_list, header, is_sparse): summary_dict = {} summary_param = {'compress_thres': params.compress_thres, 'head_size': params.head_size, 'error': params.error, 'abnormal_list': abnormal_list} for col_name, col_index in cols_dict.items(): quantile_summaries = quantile_summary_factory(is_sparse=is_sparse, param_dict=summary_param) summary_dict[col_name] = quantile_summaries _ = str(uuid.uuid1()) for _, instant in data_iter: if not is_sparse: if type(instant).__name__ == 'Instance': features = instant.features else: features = instant for col_name, summary in summary_dict.items(): col_index = cols_dict[col_name] summary.insert(features[col_index]) else: data_generator = instant.features.get_all_data() for col_idx, col_value in data_generator: col_name = header[col_idx] if col_name not in cols_dict: continue summary = summary_dict[col_name] summary.insert(col_value) result = [] for features_name, summary_obj in summary_dict.items(): summary_obj.compress() # result.append(((_, features_name), summary_obj)) result.append((features_name, summary_obj)) return result @staticmethod def _query_split_points(summary, percent_rates): split_point = [] for percent_rate in percent_rates: s_p = summary.query(percent_rate) if s_p not in split_point: split_point.append(s_p) return split_point @staticmethod def approxi_quantile(data_instances, params, cols_dict, abnormal_list, header, is_sparse): """ Calculates each quantile information Parameters ---------- data_instances : Table The input data cols_dict: dict Record key, value pairs where key is cols' name, and value is cols' index. params : FeatureBinningParam object, Parameters that user set. abnormal_list: list, default: None Specify which columns are abnormal so that will not static when traveling. header: list, Storing the header information. is_sparse: bool Specify whether data_instance is in sparse type Returns ------- summary_dict: dict {'col_name1': summary1, 'col_name2': summary2, ... } """ summary_dict = {} summary_param = {'compress_thres': params.compress_thres, 'head_size': params.head_size, 'error': params.error, 'abnormal_list': abnormal_list} for col_name, col_index in cols_dict.items(): quantile_summaries = quantile_summary_factory(is_sparse=is_sparse, param_dict=summary_param) summary_dict[col_name] = quantile_summaries QuantileBinning.insert_datas(data_instances, summary_dict, cols_dict, header, is_sparse) for _, summary_obj in summary_dict.items(): summary_obj.compress() return summary_dict @staticmethod def insert_datas(data_instances, summary_dict, cols_dict, header, is_sparse): for iter_key, instant in data_instances: if not is_sparse: if type(instant).__name__ == 'Instance': features = instant.features else: features = instant for col_name, summary in summary_dict.items(): col_index = cols_dict[col_name] summary.insert(features[col_index]) else: data_generator = instant.features.get_all_data() for col_idx, col_value in data_generator: col_name = header[col_idx] summary = summary_dict[col_name] summary.insert(col_value) @staticmethod def merge_summary_dict(s_dict1, s_dict2): if s_dict1 is None and s_dict2 is None: return None if s_dict1 is None: return s_dict2 if s_dict2 is None: return s_dict1 s_dict1 = copy.deepcopy(s_dict1) s_dict2 = copy.deepcopy(s_dict2) new_dict = {} for col_name, summary1 in s_dict1.items(): summary2 = s_dict2.get(col_name) summary1.merge(summary2) new_dict[col_name] = summary1 return new_dict @staticmethod def _query_quantile_points(col_name, summary, quantile_dict): quantile = quantile_dict.get(col_name) if quantile is not None: return col_name, summary.query(quantile) return col_name, quantile def query_quantile_point(self, query_points, col_names=None): if self.summary_dict is None: raise RuntimeError("Bin object should be fit before query quantile points") if col_names is None: col_names = self.bin_inner_param.bin_names summary_dict = self.summary_dict if isinstance(query_points, (int, float)): query_dict = {} for col_name in col_names: query_dict[col_name] = query_points elif isinstance(query_points, dict): query_dict = query_points else: raise ValueError("query_points has wrong type, should be a float, int or dict") f = functools.partial(self._query_quantile_points, quantile_dict=query_dict) result = dict(summary_dict.map(f).collect()) return result class QuantileBinningTool(QuantileBinning): """ Use for quantile binning data directly. """ def __init__(self, bin_nums=consts.G_BIN_NUM, param_obj: FeatureBinningParam = None, abnormal_list=None, allow_duplicate=False): if param_obj is None: param_obj = FeatureBinningParam(bin_num=bin_nums) super().__init__(params=param_obj, abnormal_list=abnormal_list, allow_duplicate=allow_duplicate)
11,770
36.487261
107
py
FATE
FATE-master/python/federatedml/feature/binning/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/binning/quantile_summaries.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import numpy as np from federatedml.util import consts, LOGGER """ Structure of compressed object, for memory saving we use tuple (value, g, delta) in fate>=v1.8 """ """ class Stats(object): def __init__(self, value, g: int, delta: int): self.value = value self.g = g self.delta = delta """ class QuantileSummaries(object): def __init__(self, compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD, head_size=consts.DEFAULT_HEAD_SIZE, error=consts.DEFAULT_RELATIVE_ERROR, abnormal_list=None): self.compress_thres = compress_thres self.head_size = head_size self.error = error self.head_sampled = [] self.sampled = [] # list of Stats self.count = 0 # Total observations appeared self.missing_count = 0 if abnormal_list is None: self.abnormal_list = [] else: self.abnormal_list = abnormal_list # insert a number def insert(self, x): """ Insert an observation of data. First store in a array buffer. If the buffer is full, do a batch insert. If the size of sampled list reach compress_thres, compress this list. Parameters ---------- x : float The observation that prepare to insert """ if x in self.abnormal_list or (isinstance(x, float) and np.isnan(x)): self.missing_count += 1 return x = float(x) self.head_sampled.append(x) if len(self.head_sampled) >= self.head_size: self._insert_head_buffer() if len(self.sampled) >= self.compress_thres: self.compress() def _insert_head_buffer(self): if not len(self.head_sampled): # If empty return current_count = self.count sorted_head = sorted(self.head_sampled) head_len = len(sorted_head) sample_len = len(self.sampled) new_sampled = [] sample_idx = 0 ops_idx = 0 while ops_idx < head_len: current_sample = sorted_head[ops_idx] while sample_idx < sample_len and self.sampled[sample_idx][0] <= current_sample: new_sampled.append(self.sampled[sample_idx]) sample_idx += 1 current_count += 1 # If it is the first one to insert or if it is the last one if not new_sampled or (sample_idx == sample_len and ops_idx == head_len - 1): delta = 0 else: # delta = math.floor(2 * self.error * current_count) - 1 delta = math.floor(2 * self.error * current_count) new_sampled.append((current_sample, 1, delta)) ops_idx += 1 new_sampled += self.sampled[sample_idx:] self.sampled = new_sampled self.head_sampled = [] self.count = current_count def compress(self): self._insert_head_buffer() # merge_threshold = math.floor(2 * self.error * self.count) - 1 merge_threshold = 2 * self.error * self.count compressed = self._compress_immut(merge_threshold) self.sampled = compressed def merge(self, other): """ merge current summeries with the other one. Parameters ---------- other : QuantileSummaries The summaries to be merged """ if other.head_sampled: # other._insert_head_buffer() other.compress() if self.head_sampled: # self._insert_head_buffer() self.compress() if other.count == 0: return self if self.count == 0: return other # merge two sorted array new_sample = [] i, j = 0, 0 self_sample_len = len(self.sampled) other_sample_len = len(other.sampled) while i < self_sample_len and j < other_sample_len: if self.sampled[i][0] < other.sampled[j][0]: new_sample.append(self.sampled[i]) i += 1 else: new_sample.append(other.sampled[j]) j += 1 new_sample += self.sampled[i:] new_sample += other.sampled[j:] res_summary = self.__class__(compress_thres=self.compress_thres, head_size=self.head_size, error=self.error, abnormal_list=self.abnormal_list) res_summary.count = self.count + other.count res_summary.missing_count = self.missing_count + other.missing_count res_summary.sampled = new_sample # self.sampled = new_sample # self.count += other.count # merge_threshold = math.floor(2 * self.error * self.count) - 1 merge_threshold = 2 * self.error * res_summary.count res_summary.sampled = res_summary._compress_immut(merge_threshold) return res_summary def query(self, quantile): """ Given the queried quantile, return the approximation guaranteed result Parameters ---------- quantile : float [0.0, 1.0] The target quantile Returns ------- float, the corresponding value result. """ if self.head_sampled: # self._insert_head_buffer() self.compress() if quantile < 0 or quantile > 1: raise ValueError("Quantile should be in range [0.0, 1.0]") if self.count == 0: return 0 if quantile <= self.error: return self.sampled[0][0] if quantile >= 1 - self.error: return self.sampled[-1][0] rank = math.ceil(quantile * self.count) target_error = math.ceil(self.error * self.count) min_rank = 0 i = 1 while i < len(self.sampled) - 1: cur_sample = self.sampled[i] min_rank += cur_sample[1] max_rank = min_rank + cur_sample[2] if max_rank - target_error <= rank <= min_rank + target_error: return cur_sample[0] i += 1 return self.sampled[-1][0] def query_percentile_rate_list(self, percentile_rate_list): if self.head_sampled: self.compress() if np.min(percentile_rate_list) < 0 or np.max(percentile_rate_list) > 1: raise ValueError("Quantile should be in range [0.0, 1.0]") if self.count == 0: return [0] * len(percentile_rate_list) split_points = [] i, j = 0, len(percentile_rate_list) - 1 while i < len(percentile_rate_list) and percentile_rate_list[i] <= self.error: split_points.append(self.sampled[0][0]) # split_points.append(self.sampled[0].value) i += 1 while j >= 0 and percentile_rate_list[i] >= 1 - self.error: j -= 1 k = 1 min_rank = 0 while i <= j: quantile = percentile_rate_list[i] rank = math.ceil(quantile * self.count) target_error = math.ceil(self.error * self.count) while k < len(self.sampled) - 1: # cur_sample = self.sampled[k] # min_rank += cur_sample.g # max_rank = min_rank + cur_sample.delta cur_sample_value = self.sampled[k][0] min_rank += self.sampled[k][1] max_rank = min_rank + self.sampled[k][2] if max_rank - target_error <= rank <= min_rank + target_error: split_points.append(cur_sample_value) min_rank -= self.sampled[k][1] break k += 1 if k == len(self.sampled) - 1: # split_points.append(self.sampled[-1].value) split_points.append(self.sampled[-1][0]) i += 1 while j + 1 < len(percentile_rate_list): j += 1 split_points.append(self.sampled[-1][0]) assert len(percentile_rate_list) == len(split_points) return split_points def value_to_rank(self, value): min_rank, max_rank = 0, 0 for sample in self.sampled: if sample[0] < value: min_rank += sample[1] max_rank = min_rank + sample[2] else: return (min_rank + max_rank) // 2 return (min_rank + max_rank) // 2 def query_value_list(self, values): """ Given a sorted value list, return the rank of each element in this list """ self.compress() res = [] min_rank, max_rank = 0, 0 idx = 0 sample_idx = 0 while sample_idx < len(self.sampled): v = values[idx] sample = self.sampled[sample_idx] if sample[0] <= v: min_rank += sample[1] max_rank = min_rank + sample[2] sample_idx += 1 else: res.append((min_rank + max_rank) // 2) idx += 1 if idx >= len(values): break while idx < len(values): res.append((min_rank + max_rank) // 2) idx += 1 return res def _compress_immut(self, merge_threshold): if not self.sampled: return self.sampled res = [] # Start from the last element head = self.sampled[-1] sum_g_delta = head[1] + head[2] i = len(self.sampled) - 2 # Do not merge the last element while i >= 1: this_sample = self.sampled[i] if this_sample[1] + sum_g_delta < merge_threshold: head = (head[0], head[1] + this_sample[1], head[2]) sum_g_delta += this_sample[1] else: res.append(head) head = this_sample sum_g_delta = head[1] + head[2] i -= 1 res.append(head) # If head of current sample is smaller than this new res's head # Add current head into res current_head = self.sampled[0] if current_head[0] <= head[0] and len(self.sampled) > 1: res.append(current_head) # Python do not support prepend, thus, use reverse instead res.reverse() return res class SparseQuantileSummaries(QuantileSummaries): def __init__(self, compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD, head_size=consts.DEFAULT_HEAD_SIZE, error=consts.DEFAULT_RELATIVE_ERROR, abnormal_list=None): super(SparseQuantileSummaries, self).__init__(compress_thres, head_size, error, abnormal_list) # Compare with the sparse point, static the number of each part. self.smaller_num = 0 self.bigger_num = 0 self._total_count = 0 def set_total_count(self, total_count): self._total_count = total_count return self @property def summary_count(self): return self._total_count - self.missing_count def insert(self, x): if x in self.abnormal_list or np.isnan(x): self.missing_count += 1 return if x < consts.FLOAT_ZERO: self.smaller_num += 1 elif x >= consts.FLOAT_ZERO: self.bigger_num += 1 super(SparseQuantileSummaries, self).insert(x) def query(self, quantile): if self.zero_lower_bound < quantile < self.zero_upper_bound: return 0.0 non_zero_quantile = self._convert_query_percentile(quantile) result = super(SparseQuantileSummaries, self).query(non_zero_quantile) return result def query_percentile_rate_list(self, percentile_rate_list): result = [] non_zero_quantile_list = list() for quantile in percentile_rate_list: if self.zero_lower_bound < quantile < self.zero_upper_bound: result.append(0.0) else: non_zero_quantile_list.append(self._convert_query_percentile(quantile)) if non_zero_quantile_list: result += super(SparseQuantileSummaries, self).query_percentile_rate_list(non_zero_quantile_list) return result def value_to_rank(self, value): quantile_rank = super().value_to_rank(value) zeros_count = self.zero_counts if value > 0: return quantile_rank + zeros_count elif value < 0: return quantile_rank else: return quantile_rank + zeros_count // 2 def merge(self, other): assert isinstance(other, SparseQuantileSummaries) res_summary = super(SparseQuantileSummaries, self).merge(other) res_summary.smaller_num = self.smaller_num + other.smaller_num res_summary.bigger_num = self.bigger_num + other.bigger_num return res_summary def _convert_query_percentile(self, quantile): zeros_count = self.zero_counts if zeros_count == 0: return quantile if quantile <= self.zero_lower_bound: return ((self._total_count - self.missing_count) / self.count) * quantile return (quantile - self.zero_upper_bound + self.zero_lower_bound) / ( 1 - self.zero_upper_bound + self.zero_lower_bound) @property def zero_lower_bound(self): if self.smaller_num == 0: return 0.0 return self.smaller_num / (self._total_count - self.missing_count) @property def zero_upper_bound(self): if self.bigger_num == 0: return self._total_count - self.missing_count return (self.smaller_num + self.zero_counts) / (self._total_count - self.missing_count) @property def zero_counts(self): return self._total_count - self.smaller_num - self.bigger_num - self.missing_count def query_value_list(self, values): summary_ranks = super().query_value_list(values) res = [] for v, r in zip(values, summary_ranks): if v == 0: res.append(self.smaller_num) elif v < 0: res.append(r) else: res.append(r + self.zero_counts) return res def quantile_summary_factory(is_sparse, param_dict): if is_sparse: return SparseQuantileSummaries(**param_dict) else: return QuantileSummaries(**param_dict)
15,189
32.68071
109
py
FATE
FATE-master/python/federatedml/feature/binning/bucket_binning.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.feature.binning.base_binning import BaseBinning from federatedml.statistic.statics import MultivariateStatisticalSummary from federatedml.statistic import data_overview class BucketBinning(BaseBinning): """ For bucket binning, the length of each bin is the same which is: L = [max(x) - min(x)] / n The split points are min(x) + L * k where k is the index of a bin. """ def fit_split_points(self, data_instances): """ Apply the binning method Parameters ---------- data_instances : Table The input data Returns ------- split_points : dict. Each value represent for the split points for a feature. The element in each row represent for the corresponding split point. e.g. split_points = {'x1': [0.1, 0.2, 0.3, 0.4 ...], # The first feature 'x2': [1, 2, 3, 4, ...], # The second feature ...] # Other features """ header = data_overview.get_header(data_instances) anonymous_header = data_overview.get_anonymous_header(data_instances) self._default_setting(header, anonymous_header) # is_sparse = data_overview.is_sparse_data(data_instances) # if is_sparse: # raise RuntimeError("Bucket Binning method has not supported sparse data yet.") # self._init_cols(data_instances) statistics = MultivariateStatisticalSummary(data_instances, self.bin_inner_param.bin_indexes, abnormal_list=self.abnormal_list) max_dict = statistics.get_max() min_dict = statistics.get_min() for col_name, max_value in max_dict.items(): min_value = min_dict.get(col_name) split_points = [] L = (max_value - min_value) / self.bin_num for k in range(self.bin_num - 1): s_p = min_value + (k + 1) * L split_points.append(s_p) split_points.append(max_value) # final_split_points[col_name] = split_point self.bin_results.put_col_split_points(col_name, split_points) self.fit_category_features(data_instances) return self.bin_results.all_split_points
3,082
37.5375
106
py
FATE
FATE-master/python/federatedml/feature/binning/quantile_tool.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.statistic import data_overview from federatedml.util import consts, LOGGER class QuantileBinningTool(QuantileBinning): """ Use for quantile binning data directly. """ def __init__(self, bin_nums=consts.G_BIN_NUM, param_obj: FeatureBinningParam = None, abnormal_list=None, allow_duplicate=False): if param_obj is None: param_obj = FeatureBinningParam(bin_num=bin_nums) super().__init__(params=param_obj, abnormal_list=abnormal_list, allow_duplicate=allow_duplicate) self.has_fit = False def fit_split_points(self, data_instances): res = super(QuantileBinningTool, self).fit_split_points(data_instances) self.has_fit = True return res def fit_summary(self, data_instances, is_sparse=None): if is_sparse is None: is_sparse = data_overview.is_sparse_data(data_instances) LOGGER.debug(f"is_sparse: {is_sparse}") f = functools.partial(self.feature_summary, params=self.params, abnormal_list=self.abnormal_list, cols_dict=self.bin_inner_param.bin_cols_map, header=self.header, is_sparse=is_sparse) summary_dict_table = data_instances.mapReducePartitions(f, self.copy_merge) # summary_dict = dict(summary_dict.collect()) if is_sparse: total_count = data_instances.count() summary_dict_table = summary_dict_table.mapValues(lambda x: x.set_total_count(total_count)) return summary_dict_table def get_quantile_point(self, quantile): """ Return the specific quantile point value Parameters ---------- quantile : float, 0 <= quantile <= 1 Specify which column(s) need to apply statistic. Returns ------- return a dict of result quantile points. eg. quantile_point = {"x1": 3, "x2": 5... } """ if not self.has_fit: raise RuntimeError("Quantile Binning Tool's split points should be fit before calling" " get quantile points") f = functools.partial(self._get_split_points, allow_duplicate=self.allow_duplicate, percentile_rate=[quantile]) quantile_points = dict(self.summary_dict.mapValues(f).collect()) quantile_points = {k: v[0] for k, v in quantile_points.items()} return quantile_points def get_median(self): return self.get_quantile_point(0.5)
3,447
38.181818
104
py
FATE
FATE-master/python/federatedml/feature/binning/bin_result.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from federatedml.protobuf.generated import feature_binning_param_pb2 from federatedml.util import LOGGER class BinColResults(object): def __init__(self, woe_array=(), iv_array=(), event_count_array=(), non_event_count_array=(), event_rate_array=(), non_event_rate_array=(), iv=None, optimal_metric_array=()): self.woe_array = list(woe_array) self.iv_array = list(iv_array) self.event_count_array = list(event_count_array) self.non_event_count_array = list(non_event_count_array) self.event_rate_array = list(event_rate_array) self.non_event_rate_array = list(non_event_rate_array) self.split_points = None if iv is None: iv = 0 for idx, woe in enumerate(self.woe_array): non_event_rate = non_event_count_array[idx] event_rate = event_rate_array[idx] iv += (non_event_rate - event_rate) * woe self.iv = iv self._bin_anonymous = None self.optimal_metric_array = list(optimal_metric_array) self.missing = False self.bin_count = None @property def bin_anonymous(self): if self.split_points is None or len(self.split_points) == 0: return [] if self._bin_anonymous is None: return ["bin_" + str(i) for i in range(len(self.split_points))] return self._bin_anonymous @bin_anonymous.setter def bin_anonymous(self, x): self._bin_anonymous = x def set_split_points(self, split_points): self.split_points = split_points def set_optimal_metric(self, metric_array): self.optimal_metric_array = metric_array def set_missing(self, if_missing): self.missing = if_missing def get_split_points(self): return np.array(self.split_points) @property def is_woe_monotonic(self): """ Check the woe is monotonic or not """ woe_array = self.woe_array if len(woe_array) <= 1: return True is_increasing = all(x <= y for x, y in zip(woe_array, woe_array[1:])) is_decreasing = all(x >= y for x, y in zip(woe_array, woe_array[1:])) return is_increasing or is_decreasing @property def bin_nums(self): if self.bin_count is not None: return self.bin_count if self.split_points is not None: bin_num = len(self.split_points) if self.missing: bin_num += 1 return bin_num return len(self.iv_array) def result_dict(self): save_dict = self.__dict__ save_dict['is_woe_monotonic'] = self.is_woe_monotonic save_dict['bin_nums'] = self.bin_nums return save_dict def reconstruct(self, iv_obj): self.woe_array = list(iv_obj.woe_array) self.iv_array = list(iv_obj.iv_array) self.event_count_array = list(iv_obj.event_count_array) self.non_event_count_array = list(iv_obj.non_event_count_array) self.event_rate_array = list(iv_obj.event_rate_array) self.non_event_rate_array = list(iv_obj.non_event_rate_array) self.split_points = list(iv_obj.split_points) self.iv = iv_obj.iv self.bin_count = iv_obj.bin_nums # new attribute since ver 1.10 if hasattr(iv_obj, "optimal_metric_array"): self.optimal_metric_array = list(iv_obj.optimal_metric_array) def generate_pb_dict(self): result = { "woe_array": self.woe_array, "iv_array": self.iv_array, "event_count_array": self.event_count_array, "non_event_count_array": self.non_event_count_array, "event_rate_array": self.event_rate_array, "non_event_rate_array": self.non_event_rate_array, "split_points": self.split_points, "iv": self.iv, "is_woe_monotonic": self.is_woe_monotonic, "bin_nums": self.bin_nums, "bin_anonymous": self.bin_anonymous, "optimal_metric_array": self.optimal_metric_array } return result class SplitPointsResult(object): def __init__(self): self.split_results = {} self.optimal_metric = {} def put_col_split_points(self, col_name, split_points): self.split_results[col_name] = split_points def put_col_optimal_metric_array(self, col_name, metric_array): self.optimal_metric[col_name] = metric_array @property def all_split_points(self): return self.split_results @property def all_optimal_metric(self): return self.optimal_metric def get_split_points_array(self, col_names): split_points_result = [] for col_name in col_names: if col_name not in self.split_results: continue split_points_result.append(self.split_results[col_name]) return np.array(split_points_result) def to_json(self): return {k: list(v) for k, v in self.split_results.items()} class BinResults(object): def __init__(self): self.all_cols_results = {} # {col_name: BinColResult} self.role = '' self.party_id = '' def set_role_party(self, role, party_id): self.role = role self.party_id = party_id def put_col_results(self, col_name, col_results: BinColResults): ori_col_results = self.all_cols_results.get(col_name) if ori_col_results is not None: col_results.set_split_points(ori_col_results.get_split_points()) self.all_cols_results[col_name] = col_results def put_col_split_points(self, col_name, split_points): col_results = self.all_cols_results.get(col_name, BinColResults()) col_results.set_split_points(split_points) self.all_cols_results[col_name] = col_results def put_col_missing(self, col_name, if_missing): col_results = self.all_cols_results.get(col_name, BinColResults()) col_results.set_missing(if_missing) self.all_cols_results[col_name] = col_results def query_split_points(self, col_name): col_results = self.all_cols_results.get(col_name) if col_results is None: LOGGER.warning("Querying non-exist split_points") return None return col_results.split_points def put_optimal_metric_array(self, col_name, metric_array): col_results = self.all_cols_results.get(col_name, BinColResults()) col_results.set_optimal_metric(metric_array) self.all_cols_results[col_name] = col_results @property def all_split_points(self): results = {} for col_name, col_result in self.all_cols_results.items(): results[col_name] = col_result.get_split_points() return results @property def all_ivs(self): return [(col_name, x.iv) for col_name, x in self.all_cols_results.items()] @property def all_woes(self): return {col_name: x.woe_array for col_name, x in self.all_cols_results.items()} @property def all_monotonic(self): return {col_name: x.is_woe_monotonic for col_name, x in self.all_cols_results.items()} @property def all_optimal_metric(self): return {col_name: x.optimal_metric_array for col_name, x in self.all_cols_results.items()} def summary(self, split_points=None): if split_points is None: split_points = {} for col_name, x in self.all_cols_results.items(): sp = x.get_split_points().tolist() split_points[col_name] = sp # split_points = {col_name: x.split_points for col_name, x in self.all_cols_results.items()} return {"iv": self.all_ivs, "woe": self.all_woes, "monotonic": self.all_monotonic, "split_points": split_points} def generated_pb(self, split_points=None): col_result_dict = {} if split_points is not None: for col_name, sp in split_points.items(): self.put_col_split_points(col_name, sp) for col_name, col_bin_result in self.all_cols_results.items(): bin_res_dict = col_bin_result.generate_pb_dict() # LOGGER.debug(f"col name: {col_name}, bin_res_dict: {bin_res_dict}") col_result_dict[col_name] = feature_binning_param_pb2.IVParam(**bin_res_dict) # LOGGER.debug("In generated_pb, role: {}, party_id: {}".format(self.role, self.party_id)) result_pb = feature_binning_param_pb2.FeatureBinningResult(binning_result=col_result_dict, role=self.role, party_id=str(self.party_id)) return result_pb def reconstruct(self, result_pb): self.role = result_pb.role self.party_id = result_pb.party_id binning_result = dict(result_pb.binning_result) for col_name, col_bin_result in binning_result.items(): col_bin_obj = BinColResults() col_bin_obj.reconstruct(col_bin_result) self.all_cols_results[col_name] = col_bin_obj return self def update_anonymous(self, anonymous_header_dict): all_cols_results = dict() for col_name, col_bin_result in self.all_cols_results.items(): updated_col_name = anonymous_header_dict[col_name] all_cols_results[updated_col_name] = col_bin_result self.all_cols_results = all_cols_results return self class MultiClassBinResult(BinResults): def __init__(self, labels): super().__init__() self.labels = labels if len(self.labels) == 2: self.is_multi_class = False self.bin_results = [BinResults()] else: self.is_multi_class = True self.bin_results = [BinResults() for _ in range(len(self.labels))] def set_role_party(self, role, party_id): self.role = role self.party_id = party_id for br in self.bin_results: br.set_role_party(role, party_id) def put_col_results(self, col_name, col_results: BinColResults, label_idx=0): self.bin_results[label_idx].put_col_results(col_name, col_results) def summary(self, split_points=None): if not self.is_multi_class: return {"result": self.bin_results[0].summary(split_points)} return {label: self.bin_results[label_idx].summary(split_points) for label_idx, label in enumerate(self.labels)} def put_col_split_points(self, col_name, split_points, label_idx=None): if label_idx is None: for br in self.bin_results: br.put_col_split_points(col_name, split_points) else: self.bin_results[label_idx].put_col_split_points(col_name, split_points) def put_col_missing(self, col_name, if_missing, label_idx=None): if label_idx is None: for br in self.bin_results: br.put_col_missing(col_name, if_missing) else: self.bin_results[label_idx].put_col_missing(col_name, if_missing) def put_optimal_metric_array(self, col_name, metric_array, label_idx=None): if label_idx is None: for br in self.bin_results: br.put_optimal_metric_array(col_name, metric_array) else: self.bin_results[label_idx].put_optimal_metric_array(col_name, metric_array) def generated_pb_list(self, split_points=None): res = [] for br in self.bin_results: res.append(br.generated_pb(split_points)) return res @staticmethod def reconstruct(result_pb, labels=None): if not isinstance(result_pb, list): result_pb = [result_pb] if labels is None: if len(result_pb) <= 1: labels = [0, 1] else: labels = list(range(len(result_pb))) result = MultiClassBinResult(labels) for idx, pb in enumerate(result_pb): result.bin_results[idx].reconstruct(pb) return result def update_anonymous(self, anonymous_header_dict): for idx in range(len(self.bin_results)): self.bin_results[idx].update_anonymous(anonymous_header_dict) @property def all_split_points(self): return self.bin_results[0].all_split_points
13,139
36.758621
100
py
FATE
FATE-master/python/federatedml/feature/binning/test/quantile_binning_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import unittest import uuid import numpy as np from fate_arch.session import computing_session as session from fate_arch.session import Session from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.feature.instance import Instance from federatedml.feature.sparse_vector import SparseVector from federatedml.util import consts bin_num = 10 TEST_LARGE_DATA = False # job_id = str(uuid.uuid1()) # session.init(job_id, 1) class TestQuantileBinning(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) # session = Session.create(0, 0).init_computing("abc").computing session.init(self.job_id) def test_binning_correctness(self): bin_obj = self._bin_obj_generator() small_table = self.gen_data(10000, 50, 2) split_points = bin_obj.fit_split_points(small_table) expect_split_points = list((range(1, bin_num))) expect_split_points = [float(x) for x in expect_split_points] for _, s_ps in split_points.items(): s_ps = s_ps.tolist() self.assertListEqual(s_ps, expect_split_points) def test_large_binning(self): if TEST_LARGE_DATA: bin_obj = self._bin_obj_generator() small_table = self.gen_data(100000, 1000, 48, use_random=True) _ = bin_obj.fit_split_points(small_table) def test_sparse_data(self): feature_num = 50 bin_obj = self._bin_obj_generator() small_table = self.gen_data(10000, feature_num, 2, is_sparse=True) split_points = bin_obj.fit_split_points(small_table) expect_split_points = list((range(1, bin_num))) expect_split_points = [float(x) for x in expect_split_points] for feature_name, s_ps in split_points.items(): if int(feature_name) >= feature_num: continue s_ps = s_ps.tolist() self.assertListEqual(s_ps, expect_split_points) def test_abnormal(self): abnormal_list = [3, 4] bin_obj = self._bin_obj_generator(abnormal_list=abnormal_list, this_bin_num=bin_num - len(abnormal_list)) small_table = self.gen_data(10000, 50, 2) split_points = bin_obj.fit_split_points(small_table) expect_split_points = list((range(1, bin_num))) expect_split_points = [float(x) for x in expect_split_points if x not in abnormal_list] for _, s_ps in split_points.items(): s_ps = s_ps.tolist() self.assertListEqual(s_ps, expect_split_points) def _bin_obj_generator(self, abnormal_list: list = None, this_bin_num=bin_num): bin_param = FeatureBinningParam(method='quantile', compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD, head_size=consts.DEFAULT_HEAD_SIZE, error=consts.DEFAULT_RELATIVE_ERROR, bin_indexes=-1, bin_num=this_bin_num) bin_obj = QuantileBinning(bin_param, abnormal_list=abnormal_list) return bin_obj def gen_data(self, data_num, feature_num, partition, is_sparse=False, use_random=False): data = [] shift_iter = 0 header = [str(i) for i in range(feature_num)] anonymous_header = ["guest_9999_x" + str(i) for i in range(feature_num)] for data_key in range(data_num): value = data_key % bin_num if value == 0: if shift_iter % bin_num == 0: value = bin_num - 1 shift_iter += 1 if not is_sparse: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) inst = Instance(inst_id=data_key, features=features, label=data_key % 2) else: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) data_index = [x for x in range(feature_num)] sparse_inst = SparseVector(data_index, data=features, shape=10 * feature_num) inst = Instance(inst_id=data_key, features=sparse_inst, label=data_key % 2) header = [str(i) for i in range(feature_num * 10)] data.append((data_key, inst)) result = session.parallelize(data, include_key=True, partition=partition) result.schema = {'header': header, "anonymous_header": anonymous_header} return result def tearDown(self): session.stop() # try: # session.cleanup("*", self.job_id, True) # except EnvironmentError: # pass # try: # session.cleanup("*", self.job_id, False) # except EnvironmentError: # pass if __name__ == '__main__': unittest.main()
5,697
37.5
113
py
FATE
FATE-master/python/federatedml/feature/binning/test/iv_calculator_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import unittest import uuid import numpy as np from fate_arch.session import computing_session as session from fate_arch.session import Session from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.feature.binning.iv_calculator import IvCalculator from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.feature.instance import Instance from federatedml.feature.sparse_vector import SparseVector from federatedml.util import consts bin_num = 10 class TestIvCalculator(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) # session = Session.create(0, 0).init_computing("abc").computing session.init(self.job_id) def test_iv_calculator(self): bin_obj = self._bin_obj_generator() small_table = self.gen_data(10000, 50, 2) split_points = bin_obj.fit_split_points(small_table) iv_calculator = IvCalculator(adjustment_factor=0.5, role="guest", party_id=9999) ivs = iv_calculator.cal_local_iv(small_table, split_points) print(f"iv result: {ivs.summary()}") # def test_sparse_data(self): # feature_num = 50 # bin_obj = self._bin_obj_generator() # small_table = self.gen_data(10000, feature_num, 2, is_sparse=True) # split_points = bin_obj.fit_split_points(small_table) # expect_split_points = list((range(1, bin_num))) # expect_split_points = [float(x) for x in expect_split_points] # # for feature_name, s_ps in split_points.items(): # if int(feature_name) >= feature_num: # continue # s_ps = s_ps.tolist() # self.assertListEqual(s_ps, expect_split_points) # # def test_abnormal(self): # abnormal_list = [3, 4] # bin_obj = self._bin_obj_generator(abnormal_list=abnormal_list, this_bin_num=bin_num - len(abnormal_list)) # small_table = self.gen_data(10000, 50, 2) # split_points = bin_obj.fit_split_points(small_table) # expect_split_points = list((range(1, bin_num))) # expect_split_points = [float(x) for x in expect_split_points if x not in abnormal_list] # # for _, s_ps in split_points.items(): # s_ps = s_ps.tolist() # self.assertListEqual(s_ps, expect_split_points) # def _bin_obj_generator(self, abnormal_list: list = None, this_bin_num=bin_num): bin_param = FeatureBinningParam(method='quantile', compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD, head_size=consts.DEFAULT_HEAD_SIZE, error=consts.DEFAULT_RELATIVE_ERROR, bin_indexes=-1, bin_num=this_bin_num) bin_obj = QuantileBinning(bin_param, abnormal_list=abnormal_list) return bin_obj def gen_data(self, data_num, feature_num, partition, is_sparse=False, use_random=False): data = [] shift_iter = 0 header = [str(i) for i in range(feature_num)] anonymous_header = ["guest_9999_x" + str(i) for i in range(feature_num)] for data_key in range(data_num): value = data_key % bin_num if value == 0: if shift_iter % bin_num == 0: value = bin_num - 1 shift_iter += 1 if not is_sparse: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) inst = Instance(inst_id=data_key, features=features, label=data_key % 2) else: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) data_index = [x for x in range(feature_num)] sparse_inst = SparseVector(data_index, data=features, shape=10 * feature_num) inst = Instance(inst_id=data_key, features=sparse_inst, label=data_key % 2) header = [str(i) for i in range(feature_num * 10)] data.append((data_key, inst)) result = session.parallelize(data, include_key=True, partition=partition) result.schema = {'header': header, "anonymous_header": anonymous_header} return result def tearDown(self): session.stop() if __name__ == '__main__': unittest.main()
5,189
39.546875
115
py
FATE
FATE-master/python/federatedml/feature/binning/test/bucket_binning_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import numpy as np from fate_arch.session import computing_session as session session.init("123") from federatedml.feature.binning.bucket_binning import BucketBinning from federatedml.feature.instance import Instance from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.feature.binning.iv_calculator import IvCalculator class TestBucketBinning(unittest.TestCase): def setUp(self): # eggroll.init("123") self.data_num = 1000 self.feature_num = 200 self.bin_num = 10 final_result = [] numpy_array = [] for i in range(self.data_num): if 100 < i < 500: continue tmp = i * np.ones(self.feature_num) inst = Instance(inst_id=i, features=tmp, label=i % 2) tmp_pair = (str(i), inst) final_result.append(tmp_pair) numpy_array.append(tmp) table = session.parallelize(final_result, include_key=True, partition=10) header = ['x' + str(i) for i in range(self.feature_num)] anonymous_header = ["guest_9999_x" + str(i) for i in range(self.feature_num)] self.table = table self.table.schema = {'header': header, "anonymous_header": anonymous_header} self.numpy_table = np.array(numpy_array) self.cols = [1, 2] def test_bucket_binning(self): bin_param = FeatureBinningParam(bin_num=self.bin_num, bin_indexes=self.cols) bucket_bin = BucketBinning(bin_param) split_points = bucket_bin.fit_split_points(self.table) split_point = list(split_points.values())[0] for kth, s_p in enumerate(split_point): expect_s_p = (self.data_num - 1) / self.bin_num * (kth + 1) self.assertEqual(s_p, expect_s_p) iv_calculator = IvCalculator(0.5, "guest", 9999) iv_res = iv_calculator.cal_local_iv(self.table, split_points=split_points, bin_cols_map={"x1": 1, "x2": 2}) # for col_name, iv_attr in bucket_bin.bin_results.all_cols_results.items(): for col_name, iv_attr in iv_res.bin_results[0].all_cols_results.items(): # print('col_name: {}, iv: {}, woe_array: {}'.format(col_name, iv_attr.iv, iv_attr.woe_array)) assert abs(iv_attr.iv - 0.00364386529386804) < 1e-6 def tearDown(self): # self.table.destroy() session.stop() if __name__ == '__main__': unittest.main()
3,280
37.151163
106
py
FATE
FATE-master/python/federatedml/feature/binning/test/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/binning/test/quantile_summaries_test.py
import math import unittest import numpy as np from federatedml.feature.binning.quantile_summaries import QuantileSummaries class TestQuantileSummaries(unittest.TestCase): def setUp(self): self.percentile_rate = list(range(0, 100, 1)) self.data_num = 10000 np.random.seed(15) self.table = np.random.randn(self.data_num) compress_thres = 1000 head_size = 500 self.error = 0.00001 self.quantile_summaries = QuantileSummaries(compress_thres=compress_thres, head_size=head_size, error=self.error) def test_correctness(self): for num in self.table: self.quantile_summaries.insert(num) x = sorted(self.table) for q_num in self.percentile_rate: percent = q_num / 100 sk2 = self.quantile_summaries.query(percent) min_rank = math.floor((percent - 2 * self.error) * self.data_num) max_rank = math.ceil((percent + 2 * self.error) * self.data_num) if min_rank < 0: min_rank = 0 if max_rank > len(x) - 1: max_rank = len(x) - 1 min_value, max_value = x[min_rank], x[max_rank] try: self.assertTrue(min_value <= sk2 <= max_value) except AssertionError as e: print(f"min_value: {min_value}, max_value: {max_value}, sk2: {sk2}, percent: {percent}," f"total_max_value: {x[-1]}") raise AssertionError(e) def test_multi(self): for n in range(5): self.table = np.random.randn(self.data_num) compress_thres = 10000 head_size = 5000 self.quantile_summaries = QuantileSummaries(compress_thres=compress_thres, head_size=head_size, error=self.error) self.test_correctness() if __name__ == '__main__': unittest.main()
2,110
35.396552
104
py
FATE
FATE-master/python/federatedml/feature/binning/test/base_binning_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from fate_arch.session import computing_session as session session.init("123") from federatedml.feature.instance import Instance from federatedml.statistic.statics import MultivariateStatisticalSummary class TestBaseBinningFunctions(unittest.TestCase): def setUp(self): self.table_list = [] def _gen_data(self, label_histogram: dict, partition=10): label_list = [] data_num = 0 for y, n in label_histogram.items(): data_num += n label_list.extend([y] * n) np.random.shuffle(label_list) data_insts = [] for i in range(data_num): features = np.random.randn(10) inst = Instance(features=features, label=label_list[i]) data_insts.append((i, inst)) result = session.parallelize(data_insts, include_key=True, partition=partition) result.schema = {'header': ['d' + str(x) for x in range(10)]} self.table_list.append(result) return result def test_histogram(self): histograms = [ {0: 100, 1: 100}, {0: 9700, 1: 300}, {0: 2000, 1: 18000}, {0: 8000, 1: 2000} ] partitions = [10, 1, 48, 32] for i, h in enumerate(histograms): data = self._gen_data(h, partitions[i]) summary_obj = MultivariateStatisticalSummary(data_instances=data) label_hist = summary_obj.get_label_histogram() self.assertDictEqual(h, label_hist) def tearDown(self): # for table in self.table_list: # table.destroy() session.stop() if __name__ == '__main__': unittest.main()
2,368
29.371795
87
py
FATE
FATE-master/python/federatedml/feature/binning/test/test_optimal_binning/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/binning/test/test_optimal_binning/hetero_optimal_feature_binning_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np from fate_arch.common import Party from fate_arch.session import PartiesInfo from fate_arch.session import computing_session as session from fate_arch.session import Session from federatedml.feature.hetero_feature_binning.hetero_binning_guest import HeteroFeatureBinningGuest from federatedml.feature.hetero_feature_binning.hetero_binning_host import HeteroFeatureBinningHost from federatedml.feature.instance import Instance from federatedml.feature.sparse_vector import SparseVector from federatedml.util import consts GUEST = 'guest' HOST = 'host' class TestHeteroFeatureBinning(): def __init__(self, role, guest_id, host_id): self.role = role self.guest_id = guest_id self.host_id = host_id self.model_name = 'HeteroFeatureBinning' self.args = None self.table_list = [] self.binning_obj = None def _gen_data(self, data_num, feature_num, partition, expect_ratio, is_sparse=False, use_random=False): data = [] shift_iter = 0 header = [str(i) for i in range(feature_num)] # bin_num = 3 label_count = {} # expect_ratio = { # 0: (1, 9), # 1: (1, 1), # 2: (9, 1) # } bin_num = len(expect_ratio) for data_key in range(data_num): value = data_key % bin_num if value == 0: if shift_iter % bin_num == 0: value = bin_num - 1 shift_iter += 1 if not is_sparse: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) label = self.__gen_label(value, label_count, expect_ratio) inst = Instance(inst_id=data_key, features=features, label=label) else: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) data_index = [x for x in range(feature_num)] sparse_inst = SparseVector(data_index, data=features, shape=10 * feature_num) label = self.__gen_label(value, label_count, expect_ratio) inst = Instance(inst_id=data_key, features=sparse_inst, label=label) header = [str(i) for i in range(feature_num * 10)] data.append((data_key, inst)) result = session.parallelize(data, include_key=True, partition=partition) result.schema = {'header': header} self.table_list.append(result) return result def __gen_label(self, value, label_count: dict, expect_ratio: dict): """ Generate label according to expect event and non-event ratio """ if value not in expect_ratio: return np.random.randint(0, 2) expect_zero, expect_one = expect_ratio[value] if expect_zero == 0: return 1 if expect_one == 0: return 0 if value not in label_count: label = 1 if expect_one >= expect_zero else 0 label_count[value] = [0, 0] label_count[value][label] += 1 return label curt_zero, curt_one = label_count[value] if curt_zero == 0: label_count[value][0] += 1 return 0 if curt_one == 0: label_count[value][1] += 1 return 1 if curt_zero / curt_one <= expect_zero / expect_one: label_count[value][0] += 1 return 0 else: label_count[value][1] += 1 return 1 def _make_param_dict(self, process_type='fit'): guest_componet_param = { "local": { "role": self.role, "party_id": self.guest_id if self.role == GUEST else self.host_id }, "role": { "guest": [ self.guest_id ], "host": [ self.host_id ] }, "FeatureBinningParam": { "method": consts.OPTIMAL, "optimal_binning_param": { "metric_method": "gini" } }, "process_method": process_type, } return guest_componet_param def run_data(self, table_args, run_type='fit'): if self.binning_obj is not None: return self.binning_obj if self.role == GUEST: binning_obj = HeteroFeatureBinningGuest() else: binning_obj = HeteroFeatureBinningHost() # param_obj = FeatureBinningParam(method=consts.QUANTILE) # binning_obj.model_param = param_obj guest_param = self._make_param_dict(run_type) binning_obj.run(guest_param, table_args) print("current binning method: {}, split_points: {}".format(binning_obj.model_param.method, binning_obj.binning_obj.split_points)) self.binning_obj = binning_obj return binning_obj def test_feature_binning(self): data_num = 1000 feature_num = 50 partition = 48 expect_ratio = { 0: (1, 9), 1: (1, 1), 2: (9, 1) } data_inst = self._gen_data(data_num, feature_num, partition, expect_ratio) table_args = {"data": {self.model_name: {"data": data_inst}}} self.args = table_args binning_obj = self.run_data(table_args, 'fit') result_data = binning_obj.save_data() fit_data = result_data.collect() fit_result = {} for k, v in fit_data: fit_result[k] = v.features fit_model = {self.model_name: binning_obj.export_model()} transform_args = { 'data': { self.model_name: { 'data': data_inst } }, 'model': fit_model } # binning_guest = HeteroFeatureBinningGuest() transform_obj = self.run_data(transform_args, 'transform') # guest_param = self._make_param_dict('transform') # binning_guest.run(guest_param, guest_args) result_data = transform_obj.save_data() transformed_data = result_data.collect() print("data in transform") for k, v in transformed_data: fit_v: np.ndarray = fit_result.get(k) # print("k: {}, v: {}, fit_v: {}".format(k, v.features, fit_v)) assert all(fit_v == v.features) return fit_model, transform_obj def tearDown(self): # for table in self.table_list: # table.destroy() print("Finish testing") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-r', '--role', required=False, type=str, help="role", choices=(GUEST, HOST), default=GUEST) parser.add_argument('-gid', '--gid', required=False, type=str, help="guest party id", default='9999') parser.add_argument('-hid', '--hid', required=False, type=str, help="host party id", default='10000') parser.add_argument('-j', '--job_id', required=True, type=str, help="job_id") args = parser.parse_args() job_id = args.job_id guest_id = args.gid host_id = args.hid role = args.role with Session.create(0, 0) as session: session.init_computing(job_id) session.init_federation(federation_session_id=job_id, parties_info=PartiesInfo( local=Party(role, guest_id if role == GUEST else host_id), role_to_parties={"host": [Party("host", host_id)], "guest": [Party("guest", guest_id)]} )) test_obj = TestHeteroFeatureBinning(role, guest_id, host_id) # homo_obj.test_homo_lr() test_obj.test_feature_binning() test_obj.tearDown()
8,829
34.748988
107
py
FATE
FATE-master/python/federatedml/feature/binning/test/test_quantile_binning_module/homo_feature_binning_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np from fate_arch.session import computing_session as session from fate_arch.computing import ComputingType from fate_arch.session import Session from federatedml.feature.homo_feature_binning import homo_split_points from federatedml.feature.instance import Instance from federatedml.feature.sparse_vector import SparseVector from federatedml.util import consts GUEST = 'guest' HOST = 'host' ARBITER = 'arbiter' host_id_list = ['10000', '10001', '10002'] class TestHomoFeatureBinning(): def __init__(self, role, own_id): self.role = role self.party_id = own_id self.model_name = 'HomoFeatureBinning' self.args = None self.table_list = [] def _gen_data(self, data_num, feature_num, partition, expect_split_points, is_sparse=False, use_random=False): data = [] shift_iter = 0 header = [str(i) for i in range(feature_num)] bin_num = len(expect_split_points) for data_key in range(data_num): value = expect_split_points[data_key % bin_num] if value == expect_split_points[-1]: if shift_iter % bin_num == 0: value = expect_split_points[0] shift_iter += 1 if not is_sparse: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) inst = Instance(inst_id=data_key, features=features, label=data_key % 2) else: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) data_index = [x for x in range(feature_num)] sparse_inst = SparseVector(data_index, data=features, shape=feature_num) inst = Instance(inst_id=data_key, features=sparse_inst, label=data_key % 2) header = [str(i) for i in range(feature_num)] data.append((data_key, inst)) result = session.parallelize(data, include_key=True, partition=partition) result.schema = {'header': header} self.table_list.append(result) return result def test_homo_split_points(self, is_sparse=False): # binning_obj = HomoSplitPointCalculator(role=self.role) if self.role == consts.ARBITER: binning_obj = homo_split_points.HomoFeatureBinningServer() else: binning_obj = homo_split_points.HomoFeatureBinningClient() guest_split_points = (1, 2, 3) host_split_points = [(4, 5, 6), (7, 8, 9), (10, 11, 12)] expect_agg_sp = [guest_split_points] expect_agg_sp.extend(host_split_points) expect_agg_sp = np.mean(expect_agg_sp, axis=0) if self.role == GUEST: data_inst = self._gen_data(1000, 10, 48, expect_split_points=guest_split_points, is_sparse=is_sparse) elif self.role == ARBITER: data_inst = None else: host_idx = host_id_list.index(self.party_id) data_inst = self._gen_data(1000, 10, 48, expect_split_points=host_split_points[host_idx], is_sparse=is_sparse) agg_sp = binning_obj.average_run(data_inst, bin_num=3) for col_name, col_agg_sp in agg_sp.items(): # assert np.all(col_agg_sp == expect_agg_sp) assert np.linalg.norm(np.array(col_agg_sp) - np.array(expect_agg_sp)) < consts.FLOAT_ZERO print("is_sparse: {}, split_point detected success".format(is_sparse)) transferred_table, split_points_result, bin_sparse = binning_obj.convert_feature_to_bin(data_inst, agg_sp) if self.role == ARBITER: assert transferred_table == split_points_result == bin_sparse is None else: transferred_data = list(transferred_table.collect())[:10] print("transferred_data: {}, split_points_result: {}, bin_sparse: {}".format( [x[1].features for x in transferred_data], split_points_result, bin_sparse )) return def test_query_quantiles(self, is_sparse=False): if self.role == consts.ARBITER: binning_obj = homo_split_points.HomoFeatureBinningServer() else: binning_obj = homo_split_points.HomoFeatureBinningClient() guest_split_points = (1, 2, 3) host_split_points = [(4, 5, 6), (7, 8, 9), (10, 11, 12)] if self.role == GUEST: data_inst = self._gen_data(1000, 10, 16, expect_split_points=guest_split_points, is_sparse=is_sparse, use_random=True) elif self.role == ARBITER: data_inst = None else: host_idx = host_id_list.index(self.party_id) data_inst = self._gen_data(1000, 10, 16, expect_split_points=host_split_points[host_idx], is_sparse=is_sparse, use_random=True) query_points = binning_obj.query_quantile_points(data_inst, 0.2) print(query_points) def tearDown(self): for table in self.table_list: table.destroy() print("Finish testing") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-r', '--role', required=False, type=str, help="role", choices=(GUEST, HOST, ARBITER), default=GUEST) parser.add_argument('-pid', '--pid', required=True, type=str, help="own party id") parser.add_argument('-j', '--job_id', required=True, type=str, help="job_id") args = parser.parse_args() job_id = args.job_id own_party_id = args.pid role = args.role print("args: {}".format(args)) with Session() as session: session.init_computing(job_id, computing_type=ComputingType.STANDALONE) session.init_federation(job_id, runtime_conf={"local": { "role": role, "party_id": own_party_id }, "role": { "host": [str(x) for x in host_id_list], "guest": [ '9999' ], "arbiter": ['9998'] } }) test_obj = TestHomoFeatureBinning(role, own_party_id) # homo_obj.test_homo_lr() test_obj.test_query_quantiles() # test_obj.test_homo_split_points() # test_obj.test_homo_split_points(is_sparse=True) test_obj.tearDown()
7,576
40.404372
114
py
FATE
FATE-master/python/federatedml/feature/binning/test/test_quantile_binning_module/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/binning/test/test_quantile_binning_module/hetero_feature_binning_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np from fate_arch.session import computing_session as session from fate_arch.computing import ComputingType from fate_arch.session import Session from federatedml.feature.hetero_feature_binning.hetero_binning_guest import HeteroFeatureBinningGuest from federatedml.feature.hetero_feature_binning.hetero_binning_host import HeteroFeatureBinningHost from federatedml.feature.instance import Instance from federatedml.feature.sparse_vector import SparseVector GUEST = 'guest' HOST = 'host' class TestHeteroFeatureBinning(): def __init__(self, role, guest_id, host_id): self.role = role self.guest_id = guest_id self.host_id = host_id # self.data_num = 10 # self.feature_num = 3 # self.header = ['x' + str(i) for i in range(self.feature_num)] self.model_name = 'HeteroFeatureBinning' # self.args = {"data": {self.model_name: {"data": table}}} self.args = None self.table_list = [] self.binning_obj = None def _gen_data(self, data_num, feature_num, partition, expect_ratio, is_sparse=False, use_random=False): data = [] shift_iter = 0 header = [str(i) for i in range(feature_num)] # bin_num = 3 label_count = {} # expect_ratio = { # 0: (1, 9), # 1: (1, 1), # 2: (9, 1) # } bin_num = len(expect_ratio) for data_key in range(data_num): value = data_key % bin_num if value == 0: if shift_iter % bin_num == 0: value = bin_num - 1 shift_iter += 1 if not is_sparse: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) label = self.__gen_label(value, label_count, expect_ratio) inst = Instance(inst_id=data_key, features=features, label=label) else: if not use_random: features = value * np.ones(feature_num) else: features = np.random.random(feature_num) data_index = [x for x in range(feature_num)] sparse_inst = SparseVector(data_index, data=features, shape=10 * feature_num) label = self.__gen_label(value, label_count, expect_ratio) inst = Instance(inst_id=data_key, features=sparse_inst, label=label) header = [str(i) for i in range(feature_num * 10)] data.append((data_key, inst)) result = session.parallelize(data, include_key=True, partition=partition) result.schema = {'header': header} self.table_list.append(result) return result def __gen_label(self, value, label_count: dict, expect_ratio: dict): """ Generate label according to expect event and non-event ratio """ if value not in expect_ratio: return np.random.randint(0, 2) expect_zero, expect_one = expect_ratio[value] if expect_zero == 0: return 1 if expect_one == 0: return 0 if value not in label_count: label = 1 if expect_one >= expect_zero else 0 label_count[value] = [0, 0] label_count[value][label] += 1 return label curt_zero, curt_one = label_count[value] if curt_zero == 0: label_count[value][0] += 1 return 0 if curt_one == 0: label_count[value][1] += 1 return 1 if curt_zero / curt_one <= expect_zero / expect_one: label_count[value][0] += 1 return 0 else: label_count[value][1] += 1 return 1 def _make_param_dict(self, process_type='fit'): guest_componet_param = { "local": { "role": self.role, "party_id": self.guest_id if self.role == GUEST else self.host_id }, "role": { "guest": [ self.guest_id ], "host": [ self.host_id ] }, "process_method": process_type, } return guest_componet_param def run_data(self, table_args, run_type='fit'): if self.binning_obj is not None: return self.binning_obj if self.role == GUEST: binning_obj = HeteroFeatureBinningGuest() else: binning_obj = HeteroFeatureBinningHost() guest_param = self._make_param_dict(run_type) binning_obj.run(guest_param, table_args) self.binning_obj = binning_obj return binning_obj def test_feature_binning(self): data_num = 1000 feature_num = 50 partition = 48 expect_ratio = { 0: (1, 9), 1: (1, 1), 2: (9, 1) } data_inst = self._gen_data(data_num, feature_num, partition, expect_ratio) table_args = {"data": {self.model_name: {"data": data_inst}}} self.args = table_args binning_obj = self.run_data(table_args, 'fit') result_data = binning_obj.save_data() fit_data = result_data.collect() fit_result = {} for k, v in fit_data: fit_result[k] = v.features fit_model = {self.model_name: binning_obj.export_model()} transform_args = { 'data': { self.model_name: { 'data': data_inst } }, 'model': fit_model } # binning_guest = HeteroFeatureBinningGuest() transform_obj = self.run_data(transform_args, 'transform') # guest_param = self._make_param_dict('transform') # binning_guest.run(guest_param, guest_args) result_data = transform_obj.save_data() transformed_data = result_data.collect() print("data in transform") for k, v in transformed_data: fit_v: np.ndarray = fit_result.get(k) # print("k: {}, v: {}, fit_v: {}".format(k, v.features, fit_v)) assert all(fit_v == v.features) return fit_model, transform_obj def tearDown(self): for table in self.table_list: table.destroy() print("Finish testing") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-r', '--role', required=False, type=str, help="role", choices=(GUEST, HOST), default=GUEST) parser.add_argument('-gid', '--gid', required=False, type=str, help="guest party id", default='9999') parser.add_argument('-hid', '--hid', required=False, type=str, help="host party id", default='10000') parser.add_argument('-j', '--job_id', required=True, type=str, help="job_id") args = parser.parse_args() job_id = args.job_id guest_id = args.gid host_id = args.hid role = args.role with Session() as session: session.init_computing(job_id, computing_type=ComputingType.STANDALONE) session.init_federation(job_id, runtime_conf={"local": { "role": role, "party_id": guest_id if role == GUEST else host_id }, "role": { "host": [ host_id ], "guest": [ guest_id ] } }) test_obj = TestHeteroFeatureBinning(role, guest_id, host_id) # homo_obj.test_homo_lr() test_obj.test_feature_binning() test_obj.tearDown()
8,716
34.434959
107
py
FATE
FATE-master/python/federatedml/feature/binning/optimal_binning/heap.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np from federatedml.feature.binning.optimal_binning.bucket_info import Bucket from federatedml.param.feature_binning_param import OptimalBinningParam from federatedml.util import LOGGER class HeapNode(object): def __init__(self): self.left_bucket: Bucket = None self.right_bucket: Bucket = None self.event_count = 0 self.non_event_count = 0 self.score = None def cal_score(self): raise NotImplementedError("Should not call here") @property def total_count(self): return self.event_count + self.non_event_count class IvHeapNode(HeapNode): def __init__(self, adjustment_factor=0.5): super().__init__() self.adjustment_factor = adjustment_factor self.event_total = 0 self.non_event_total = 0 def cal_score(self): """ IV = ∑(py_i - pn_i ) * WOE where py_i is event_rate, pn_i is non_event_rate WOE = log(non_event_rate / event_rate) """ self.event_count = self.left_bucket.event_count + self.right_bucket.event_count self.non_event_count = self.left_bucket.non_event_count + self.right_bucket.non_event_count if self.total_count == 0: self.score = -math.inf return # if self.left_bucket.left_bound != math.inf and self.right_bucket.right_bound != -math.inf: # if (self.left_bucket.left_bound <= self.right_bucket.right_bound): # self.score = -math.inf # return self.event_total = self.left_bucket.event_total self.non_event_total = self.left_bucket.non_event_total if self.event_count == 0 or self.non_event_count == 0: event_rate = 1.0 * (self.event_count + self.adjustment_factor) / max(self.event_total, 1) non_event_rate = 1.0 * (self.non_event_count + self.adjustment_factor) / max(self.non_event_total, 1) else: event_rate = 1.0 * self.event_count / max(self.event_total, 1) non_event_rate = 1.0 * self.non_event_count / max(self.non_event_total, 1) merge_woe = math.log(event_rate / non_event_rate) merge_iv = (event_rate - non_event_rate) * merge_woe self.score = self.left_bucket.iv + self.right_bucket.iv - merge_iv class GiniHeapNode(HeapNode): def cal_score(self): """ gini = 1 - ∑(p_i^2 ) = 1 -(event / total)^2 - (nonevent / total)^2 """ self.event_count = self.left_bucket.event_count + self.right_bucket.event_count self.non_event_count = self.left_bucket.non_event_count + self.right_bucket.non_event_count if self.total_count == 0: self.score = -math.inf return # if self.total_count == 0 or self.left_bucket.left_bound == self.right_bucket.right_bound: # self.score = -math.inf # return merged_gini = 1 - (1.0 * self.event_count / self.total_count) ** 2 - \ (1.0 * self.non_event_count / self.total_count) ** 2 self.score = merged_gini - self.left_bucket.gini - self.right_bucket.gini class ChiSquareHeapNode(HeapNode): def cal_score(self): """ X^2 = ∑∑(A_ij - E_ij )^2 / E_ij where E_ij = (N_i / N) * C_j. N is total count of merged bucket, N_i is the total count of ith bucket and C_j is the count of jth label in merged bucket. A_ij is number of jth label in ith bucket. """ self.event_count = self.left_bucket.event_count + self.right_bucket.event_count self.non_event_count = self.left_bucket.non_event_count + self.right_bucket.non_event_count if self.total_count == 0: self.score = -math.inf return c1 = self.left_bucket.event_count + self.right_bucket.event_count c0 = self.left_bucket.non_event_count + self.right_bucket.non_event_count if c1 == 0 or c0 == 0: self.score = - math.inf return e_left_1 = (self.left_bucket.total_count / self.total_count) * c1 e_left_0 = (self.left_bucket.total_count / self.total_count) * c0 e_right_1 = (self.right_bucket.total_count / self.total_count) * c1 e_right_0 = (self.right_bucket.total_count / self.total_count) * c0 chi_square = np.square(self.left_bucket.event_count - e_left_1) / e_left_1 + \ np.square(self.left_bucket.non_event_count - e_left_0) / e_left_0 + \ np.square(self.right_bucket.event_count - e_right_1) / e_right_1 + \ np.square(self.right_bucket.non_event_count - e_right_0) / e_right_0 LOGGER.debug("chi_sqaure: {}".format(chi_square)) self.score = chi_square def heap_node_factory(optimal_param: OptimalBinningParam, left_bucket=None, right_bucket=None): metric_method = optimal_param.metric_method if metric_method == 'iv': node = IvHeapNode(adjustment_factor=optimal_param.adjustment_factor) elif metric_method == 'gini': node = GiniHeapNode() elif metric_method == 'chi_square': node = ChiSquareHeapNode() else: raise ValueError("metric_method: {} cannot recognized".format(metric_method)) if left_bucket is not None: node.left_bucket = left_bucket if right_bucket is not None: node.right_bucket = right_bucket if (left_bucket and right_bucket) is not None: node.cal_score() else: LOGGER.warning("In heap factory, left_bucket is {}, right bucket is {}, not all of them has been assign".format( left_bucket, right_bucket )) return node class MinHeap(object): def __init__(self): self.size = 0 self.node_list = [] @property def is_empty(self): return self.size <= 0 def insert(self, heap_node: HeapNode): self.size += 1 self.node_list.append(heap_node) self._move_up(self.size - 1) def remove_empty_node(self, removed_bucket_id): for n_id, node in enumerate(self.node_list): if node.left_bucket.idx == removed_bucket_id or node.right_bucket.idx == removed_bucket_id: self.delete_index_k(n_id) def delete_index_k(self, k): if k >= self.size: return if k == self.size - 1: self.node_list.pop() self.size -= 1 else: self.node_list[k] = self.node_list[self.size - 1] self.node_list.pop() self.size -= 1 if k == 0: self._move_down(k) else: parent_idx = self._get_parent_index(k) if self.node_list[parent_idx].score < self.node_list[k].score: self._move_down(k) else: self._move_up(k) def pop(self): min_node = self.node_list[0] if not self.is_empty else None if min_node is not None: self.node_list[0] = self.node_list[self.size - 1] self.node_list.pop() self.size -= 1 self._move_down(0) return min_node def _switch_node(self, idx_1, idx_2): if idx_1 >= self.size or idx_2 >= self.size: return self.node_list[idx_1], self.node_list[idx_2] = self.node_list[idx_2], self.node_list[idx_1] @staticmethod def _get_parent_index(index): if index == 0: return None parent_index = (index - 1) / 2 return int(parent_index) if parent_index >= 0 else None def _get_left_child_idx(self, idx): child_index = (2 * idx) + 1 return child_index if child_index < self.size else None def _get_right_child_idx(self, idx): child_index = (2 * idx) + 2 return child_index if child_index < self.size else None def _move_down(self, curt_idx): if curt_idx >= self.size: return min_idx = curt_idx while True: left_child_idx = self._get_left_child_idx(curt_idx) right_child_idx = self._get_right_child_idx(curt_idx) if left_child_idx is not None and self.node_list[left_child_idx].score < self.node_list[curt_idx].score: min_idx = left_child_idx if right_child_idx is not None and self.node_list[right_child_idx].score < self.node_list[min_idx].score: min_idx = right_child_idx if min_idx != curt_idx: self._switch_node(curt_idx, min_idx) curt_idx = min_idx else: break def _move_up(self, curt_idx): if curt_idx >= self.size: return while True: parent_idx = self._get_parent_index(curt_idx) if parent_idx is None: break if self.node_list[curt_idx].score < self.node_list[parent_idx].score: self._switch_node(curt_idx, parent_idx) curt_idx = parent_idx else: break
9,675
34.837037
120
py
FATE
FATE-master/python/federatedml/feature/binning/optimal_binning/bucket_info.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math class Bucket(object): def __init__(self, idx=-1, adjustment_factor=0.5, right_bound=-math.inf): self.idx = idx self.left_bound = math.inf self.right_bound = right_bound self.left_neighbor_idx = idx - 1 self.right_neighbor_idx = idx + 1 self.event_count = 0 self.non_event_count = 0 self.adjustment_factor = adjustment_factor self.event_total = None self.non_event_total = None def set_left_neighbor(self, left_idx): self.left_neighbor_idx = left_idx def set_right_neighbor(self, right_idx): self.right_neighbor_idx = right_idx @property def is_mixed(self): return self.event_count > 0 and self.non_event_count > 0 @property def total_count(self): return self.event_count + self.non_event_count def merge(self, other): if other is None: return if other.left_bound < self.left_bound: self.left_bound = other.left_bound if other.right_bound > self.right_bound: self.right_bound = other.right_bound self.event_count += other.event_count self.non_event_count += other.non_event_count return self def add(self, label, value): if label == 1: self.event_count += 1 else: self.non_event_count += 1 if value < self.left_bound: self.left_bound = value if value > self.right_bound: self.right_bound = value @property def iv(self): if self.event_total is None or self.non_event_total is None: raise AssertionError("Bucket's event_total or non_event_total has not been assigned") # only have EVENT records or Non-Event records if self.event_count == 0 or self.non_event_count == 0: event_rate = 1.0 * (self.event_count + self.adjustment_factor) / max(self.event_total, 1) non_event_rate = 1.0 * (self.non_event_count + self.adjustment_factor) / max(self.non_event_total, 1) else: event_rate = 1.0 * self.event_count / max(self.event_total, 1) non_event_rate = 1.0 * self.non_event_count / max(self.non_event_total, 1) woe = math.log(non_event_rate / event_rate) return (non_event_rate - event_rate) * woe @property def gini(self): if self.total_count == 0: return 0 return 1 - (1.0 * self.event_count / self.total_count) ** 2 - \ (1.0 * self.non_event_count / self.total_count) ** 2
3,232
33.763441
113
py
FATE
FATE-master/python/federatedml/feature/binning/optimal_binning/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/feature/binning/optimal_binning/optimal_binning.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import math import operator import numpy as np from fate_arch.session import computing_session as session from federatedml.feature.binning.base_binning import BaseBinning from federatedml.feature.binning.bucket_binning import BucketBinning from federatedml.feature.binning.optimal_binning import bucket_info from federatedml.feature.binning.optimal_binning import heap from federatedml.feature.binning.quantile_tool import QuantileBinningTool from federatedml.param.feature_binning_param import HeteroFeatureBinningParam, OptimalBinningParam from federatedml.statistic import data_overview from federatedml.statistic import statics from federatedml.util import LOGGER from federatedml.util import consts class OptimalBinning(BaseBinning): def __init__(self, params, abnormal_list=None): super().__init__(params, abnormal_list) """The following lines work only in fitting process""" if isinstance(params, HeteroFeatureBinningParam): self.optimal_param = params.optimal_binning_param self.optimal_param.adjustment_factor = params.adjustment_factor self.optimal_param.max_bin = params.bin_num if math.ceil(1.0 / self.optimal_param.max_bin_pct) > self.optimal_param.max_bin: raise ValueError("Arguments logical error, ceil(1.0/max_bin_pct) " "should be smaller or equal than bin_num") self.adjustment_factor = params.adjustment_factor self.event_total = None self.non_event_total = None self.bucket_lists = {} def fit_split_points(self, data_instances): header = data_overview.get_header(data_instances) anonymous_header = data_overview.get_anonymous_header(data_instances) self._default_setting(header, anonymous_header) if (self.event_total and self.non_event_total) is None: self.event_total, self.non_event_total = self.get_histogram(data_instances) # LOGGER.debug("In fit split points, event_total: {}, non_event_total: {}".format(self.event_total, # self.non_event_total)) bucket_table = self.init_bucket(data_instances) sample_count = data_instances.count() self.fit_buckets(bucket_table, sample_count) self.fit_category_features(data_instances) return self.bin_results.all_split_points def fit_buckets(self, bucket_table, sample_count): if self.optimal_param.metric_method in ['iv', 'gini', 'chi_square']: optimal_binning_method = functools.partial(self.merge_optimal_binning, optimal_param=self.optimal_param, sample_count=sample_count) result_bucket = bucket_table.mapValues(optimal_binning_method) for col_name, (min_heap, bucket_list, non_mixture_num, small_size_num) in result_bucket.collect(): split_points = np.unique([bucket.right_bound for bucket in bucket_list]).tolist() self.bin_results.put_col_split_points(col_name, split_points) metric_array = [node.score for node in min_heap.node_list] self.bin_results.put_col_optimal_metric_array(col_name, metric_array) # LOGGER.debug(f"column {col_name}, split_points: {split_points}, metric array: {metric_array}") self.bucket_lists[col_name] = bucket_list else: optimal_binning_method = functools.partial(self.split_optimal_binning, optimal_param=self.optimal_param, sample_count=sample_count) result_bucket = bucket_table.mapValues(optimal_binning_method) for col_name, (bucket_list, non_mixture_num, small_size_num, res_ks_array) in result_bucket.collect(): split_points = np.unique([bucket.right_bound for bucket in bucket_list]).tolist() self.bin_results.put_col_split_points(col_name, split_points) self.bin_results.put_col_optimal_metric_array(col_name, res_ks_array) # LOGGER.debug(f"column {col_name}, split_points: {split_points}, metric array: {res_ks_array}") self.bucket_lists[col_name] = bucket_list return result_bucket def init_bucket(self, data_instances): header = data_overview.get_header(data_instances) anonymous_header = data_overview.get_anonymous_header(data_instances) self._default_setting(header, anonymous_header) init_bucket_param = copy.deepcopy(self.params) init_bucket_param.bin_num = self.optimal_param.init_bin_nums if self.optimal_param.init_bucket_method == consts.QUANTILE: init_binning_obj = QuantileBinningTool(param_obj=init_bucket_param, allow_duplicate=False) else: init_binning_obj = BucketBinning(params=init_bucket_param) init_binning_obj.set_bin_inner_param(self.bin_inner_param) init_split_points = init_binning_obj.fit_split_points(data_instances) is_sparse = data_overview.is_sparse_data(data_instances) bucket_dict = dict() for col_name, sps in init_split_points.items(): bucket_list = [] for idx, sp in enumerate(sps): bucket = bucket_info.Bucket(idx, self.adjustment_factor, right_bound=sp) if idx == 0: bucket.left_bound = -math.inf bucket.set_left_neighbor(None) else: bucket.left_bound = sps[idx - 1] bucket.event_total = self.event_total bucket.non_event_total = self.non_event_total bucket_list.append(bucket) bucket_list[-1].set_right_neighbor(None) bucket_dict[col_name] = bucket_list # LOGGER.debug(f"col_name: {col_name}, length of sps: {len(sps)}, " # f"length of list: {len(bucket_list)}") convert_func = functools.partial(self.convert_data_to_bucket, split_points=init_split_points, headers=self.header, bucket_dict=copy.deepcopy(bucket_dict), is_sparse=is_sparse, get_bin_num_func=self.get_bin_num) bucket_table = data_instances.mapReducePartitions(convert_func, self.merge_bucket_list) return bucket_table @staticmethod def get_histogram(data_instances): static_obj = statics.MultivariateStatisticalSummary(data_instances, cols_index=-1) label_historgram = static_obj.get_label_histogram() event_total = label_historgram.get(1, 0) non_event_total = label_historgram.get(0, 0) # if event_total == 0 or non_event_total == 0: # LOGGER.warning(f"event_total or non_event_total might have errors, event_total: {event_total}," # f" non_event_total: {non_event_total}") return event_total, non_event_total @staticmethod def assign_histogram(bucket_list, event_total, non_event_total): for bucket in bucket_list: bucket.event_total = event_total bucket.non_event_total = non_event_total return bucket_list @staticmethod def merge_bucket_list(list1, list2): if len(list1) != len(list2): raise AssertionError("In merge bucket list, len of two lists are not equal") result = [] for idx, b1 in enumerate(list1): b2 = list2[idx] result.append(b1.merge(b2)) return result @staticmethod def convert_data_to_bucket(data_iter, split_points, headers, bucket_dict, is_sparse, get_bin_num_func): for data_key, instance in data_iter: label = instance.label if not is_sparse: if type(instance).__name__ == 'Instance': features = instance.features else: features = instance data_generator = enumerate(features) else: data_generator = instance.features.get_all_data() for idx, col_value in data_generator: col_name = headers[idx] if col_name not in split_points: continue col_split_points = split_points[col_name] bin_num = get_bin_num_func(col_value, col_split_points) bucket = bucket_dict[col_name][bin_num] bucket.add(label, col_value) result = [] for col_name, bucket_list in bucket_dict.items(): result.append((col_name, bucket_list)) return result @staticmethod def merge_optimal_binning(bucket_list, optimal_param: OptimalBinningParam, sample_count): max_item_num = math.floor(optimal_param.max_bin_pct * sample_count) min_item_num = math.ceil(optimal_param.min_bin_pct * sample_count) bucket_dict = {idx: bucket for idx, bucket in enumerate(bucket_list)} final_max_bin = optimal_param.max_bin # LOGGER.debug("Get in merge optimal binning, sample_count: {}, max_item_num: {}, min_item_num: {}," # "final_max_bin: {}".format(sample_count, max_item_num, min_item_num, final_max_bin)) min_heap = heap.MinHeap() def _add_heap_nodes(constraint=None): # LOGGER.debug(f"Add heap nodes, constraint: {}, dict_length: {}".format(constraint, len(bucket_dict))) this_non_mixture_num = 0 this_small_size_num = 0 # Make bucket satisfy mixture condition for i in range(len(bucket_dict)): left_bucket = bucket_dict[i] right_bucket = bucket_dict.get(left_bucket.right_neighbor_idx) if left_bucket.right_neighbor_idx == i: raise RuntimeError("left_bucket's right neighbor == itself") if not left_bucket.is_mixed: this_non_mixture_num += 1 if left_bucket.total_count < min_item_num: this_small_size_num += 1 if right_bucket is None: continue # Violate maximum items constraint if left_bucket.total_count + right_bucket.total_count > max_item_num: continue if constraint == 'mixture': if left_bucket.is_mixed or right_bucket.is_mixed: continue elif constraint == 'single_mixture': if left_bucket.is_mixed and right_bucket.is_mixed: continue elif constraint == 'small_size': if left_bucket.total_count >= min_item_num or right_bucket.total_count >= min_item_num: continue elif constraint == 'single_small_size': if left_bucket.total_count >= min_item_num and right_bucket.total_count >= min_item_num: continue heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) return min_heap, this_non_mixture_num, this_small_size_num def _update_bucket_info(b_dict): """ update bucket information """ order_dict = dict() for bucket_idx, item in b_dict.items(): order_dict[bucket_idx] = item.left_bound sorted_order_dict = sorted(order_dict.items(), key=operator.itemgetter(1)) start_idx = 0 for item in sorted_order_dict: bucket_idx = item[0] if start_idx == bucket_idx: start_idx += 1 continue b_dict[start_idx] = b_dict[bucket_idx] b_dict[start_idx].idx = start_idx start_idx += 1 del b_dict[bucket_idx] bucket_num = len(b_dict) for i in range(bucket_num): if i == 0: b_dict[i].set_left_neighbor(None) b_dict[i].set_right_neighbor(i + 1) else: b_dict[i].set_left_neighbor(i - 1) b_dict[i].set_right_neighbor(i + 1) b_dict[bucket_num - 1].set_right_neighbor(None) return b_dict def _merge_heap(constraint=None, aim_var=0): next_id = max(bucket_dict.keys()) + 1 while aim_var > 0 and not min_heap.is_empty: min_node = min_heap.pop() left_bucket = min_node.left_bucket right_bucket = min_node.right_bucket # Some buckets may be already merged if left_bucket.idx not in bucket_dict or right_bucket.idx not in bucket_dict: continue new_bucket = bucket_info.Bucket(idx=next_id, adjustment_factor=optimal_param.adjustment_factor) new_bucket = _init_new_bucket(new_bucket, min_node) bucket_dict[next_id] = new_bucket del bucket_dict[left_bucket.idx] del bucket_dict[right_bucket.idx] min_heap.remove_empty_node(left_bucket.idx) min_heap.remove_empty_node(right_bucket.idx) aim_var = _aim_vars_decrease(constraint, new_bucket, left_bucket, right_bucket, aim_var) _add_node_from_new_bucket(new_bucket, constraint) next_id += 1 return min_heap, aim_var def _add_node_from_new_bucket(new_bucket: bucket_info.Bucket, constraint): left_bucket = bucket_dict.get(new_bucket.left_neighbor_idx) right_bucket = bucket_dict.get(new_bucket.right_neighbor_idx) if constraint == 'mixture': if left_bucket is not None and left_bucket.total_count + new_bucket.total_count <= max_item_num: if not left_bucket.is_mixed and not new_bucket.is_mixed: heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=new_bucket) min_heap.insert(heap_node) if right_bucket is not None and right_bucket.total_count + new_bucket.total_count <= max_item_num: if not right_bucket.is_mixed and not new_bucket.is_mixed: heap_node = heap.heap_node_factory(optimal_param, left_bucket=new_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) elif constraint == 'single_mixture': if left_bucket is not None and left_bucket.total_count + new_bucket.total_count <= max_item_num: if not (left_bucket.is_mixed and new_bucket.is_mixed): heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=new_bucket) min_heap.insert(heap_node) if right_bucket is not None and right_bucket.total_count + new_bucket.total_count <= max_item_num: if not (right_bucket.is_mixed and new_bucket.is_mixed): heap_node = heap.heap_node_factory(optimal_param, left_bucket=new_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) elif constraint == 'small_size': if left_bucket is not None and left_bucket.total_count + new_bucket.total_count <= max_item_num: if left_bucket.total_count < min_item_num and new_bucket.total_count < min_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=new_bucket) min_heap.insert(heap_node) if right_bucket is not None and right_bucket.total_count + new_bucket.total_count <= max_item_num: if right_bucket.total_count < min_item_num and new_bucket.total_count < min_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=new_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) elif constraint == 'single_small_size': if left_bucket is not None and left_bucket.total_count + new_bucket.total_count <= max_item_num: if left_bucket.total_count < min_item_num or new_bucket.total_count < min_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=new_bucket) min_heap.insert(heap_node) if right_bucket is not None and right_bucket.total_count + new_bucket.total_count <= max_item_num: if right_bucket.total_count < min_item_num or new_bucket.total_count < min_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=new_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) else: if left_bucket is not None and left_bucket.total_count + new_bucket.total_count <= max_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=left_bucket, right_bucket=new_bucket) min_heap.insert(heap_node) if right_bucket is not None and right_bucket.total_count + new_bucket.total_count <= max_item_num: heap_node = heap.heap_node_factory(optimal_param, left_bucket=new_bucket, right_bucket=right_bucket) min_heap.insert(heap_node) def _init_new_bucket(new_bucket: bucket_info.Bucket, min_node: heap.HeapNode): new_bucket.left_bound = min_node.left_bucket.left_bound new_bucket.right_bound = min_node.right_bucket.right_bound new_bucket.left_neighbor_idx = min_node.left_bucket.left_neighbor_idx new_bucket.right_neighbor_idx = min_node.right_bucket.right_neighbor_idx new_bucket.event_count = min_node.left_bucket.event_count + min_node.right_bucket.event_count new_bucket.non_event_count = min_node.left_bucket.non_event_count + min_node.right_bucket.non_event_count new_bucket.event_total = min_node.left_bucket.event_total new_bucket.non_event_total = min_node.left_bucket.non_event_total left_neighbor_bucket = bucket_dict.get(new_bucket.left_neighbor_idx) if left_neighbor_bucket is not None: left_neighbor_bucket.right_neighbor_idx = new_bucket.idx right_neighbor_bucket = bucket_dict.get(new_bucket.right_neighbor_idx) if right_neighbor_bucket is not None: right_neighbor_bucket.left_neighbor_idx = new_bucket.idx return new_bucket def _aim_vars_decrease(constraint, new_bucket: bucket_info.Bucket, left_bucket, right_bucket, aim_var): if constraint in ['mixture', 'single_mixture']: if not left_bucket.is_mixed: aim_var -= 1 if not right_bucket.is_mixed: aim_var -= 1 if not new_bucket.is_mixed: aim_var += 1 elif constraint in ['small_size', 'single_small_size']: if left_bucket.total_count < min_item_num: aim_var -= 1 if right_bucket.total_count < min_item_num: aim_var -= 1 if new_bucket.total_count < min_item_num: aim_var += 1 else: aim_var = len(bucket_dict) - final_max_bin return aim_var if optimal_param.mixture: LOGGER.debug(f"Before mixture add, dict length: {len(bucket_dict)}") min_heap, non_mixture_num, small_size_num = _add_heap_nodes(constraint='mixture') min_heap, non_mixture_num = _merge_heap(constraint='mixture', aim_var=non_mixture_num) bucket_dict = _update_bucket_info(bucket_dict) min_heap, non_mixture_num, small_size_num = _add_heap_nodes(constraint='single_mixture') min_heap, non_mixture_num = _merge_heap(constraint='single_mixture', aim_var=non_mixture_num) LOGGER.debug(f"After mixture merge, min_heap size: {min_heap.size}, non_mixture_num: {non_mixture_num}") bucket_dict = _update_bucket_info(bucket_dict) LOGGER.debug(f"Before small_size add, dict length: {len(bucket_dict)}") min_heap, non_mixture_num, small_size_num = _add_heap_nodes(constraint='small_size') min_heap, small_size_num = _merge_heap(constraint='small_size', aim_var=small_size_num) bucket_dict = _update_bucket_info(bucket_dict) min_heap, non_mixture_num, small_size_num = _add_heap_nodes(constraint='single_small_size') min_heap, small_size_num = _merge_heap(constraint='single_small_size', aim_var=small_size_num) bucket_dict = _update_bucket_info(bucket_dict) # LOGGER.debug(f"Before add, dict length: {len(bucket_dict)}") min_heap, non_mixture_num, small_size_num = _add_heap_nodes() # LOGGER.debug("After normal add, small_size: {}, min_heap size: {}".format(small_size_num, min_heap.size)) min_heap, total_bucket_num = _merge_heap(aim_var=len(bucket_dict) - final_max_bin) # LOGGER.debug("After normal merge, min_heap size: {}".format(min_heap.size)) non_mixture_num = 0 small_size_num = 0 for i, bucket in bucket_dict.items(): if not bucket.is_mixed: non_mixture_num += 1 if bucket.total_count < min_item_num: small_size_num += 1 bucket_res = list(bucket_dict.values()) bucket_res = sorted(bucket_res, key=lambda bucket: bucket.left_bound) # LOGGER.debug("Before return, dict length: {}".format(len(bucket_dict))) # LOGGER.debug(f"Before return, min heap node list length: {len(min_heap.node_list)}") return min_heap, bucket_res, non_mixture_num, small_size_num @staticmethod def split_optimal_binning(bucket_list, optimal_param: OptimalBinningParam, sample_count): min_item_num = math.ceil(optimal_param.min_bin_pct * sample_count) final_max_bin = optimal_param.max_bin def _compute_ks(start_idx, end_idx): acc_event = [] acc_non_event = [] curt_event_total = 0 curt_non_event_total = 0 for bucket in bucket_list[start_idx: end_idx]: acc_event.append(bucket.event_count + curt_event_total) curt_event_total += bucket.event_count acc_non_event.append(bucket.non_event_count + curt_non_event_total) curt_non_event_total += bucket.non_event_count if curt_event_total == 0 or curt_non_event_total == 0: return None, None, None acc_event_rate = [x / curt_event_total for x in acc_event] acc_non_event_rate = [x / curt_non_event_total for x in acc_non_event] ks_list = [math.fabs(eve - non_eve) for eve, non_eve in zip(acc_event_rate, acc_non_event_rate)] if max(ks_list) == 0: best_index = len(ks_list) // 2 else: best_index = ks_list.index(max(ks_list)) left_event = acc_event[best_index] right_event = curt_event_total - left_event left_non_event = acc_non_event[best_index] right_non_event = curt_non_event_total - left_non_event left_total = left_event + left_non_event right_total = right_event + right_non_event if left_total < min_item_num or right_total < min_item_num: best_index = len(ks_list) // 2 left_event = acc_event[best_index] right_event = curt_event_total - left_event left_non_event = acc_non_event[best_index] right_non_event = curt_non_event_total - left_non_event left_total = left_event + left_non_event right_total = right_event + right_non_event best_ks = ks_list[best_index] res_dict = { 'left_event': left_event, 'right_event': right_event, 'left_non_event': left_non_event, 'right_non_event': right_non_event, 'left_total': left_total, 'right_total': right_total, 'left_is_mixed': left_event > 0 and left_non_event > 0, 'right_is_mixed': right_event > 0 and right_non_event > 0 } return best_ks, start + best_index, res_dict def _merge_buckets(start_idx, end_idx, bucket_idx): res_bucket = copy.deepcopy(bucket_list[start_idx]) res_bucket.idx = bucket_idx for bucket in bucket_list[start_idx + 1: end_idx]: res_bucket = res_bucket.merge(bucket) return res_bucket res_split_index = [] res_split_ks = {} to_split_pair = [(0, len(bucket_list))] # iteratively split while len(to_split_pair) > 0: if len(res_split_index) >= final_max_bin - 1: break start, end = to_split_pair.pop(0) if start >= end: continue best_ks, best_index, res_dict = _compute_ks(start, end) if best_ks is None: continue if optimal_param.mixture: if not (res_dict.get('left_is_mixed') and res_dict.get('right_is_mixed')): continue if res_dict.get('left_total') < min_item_num or res_dict.get('right_total') < min_item_num: continue res_split_index.append(best_index + 1) res_split_ks[best_index + 1] = best_ks if res_dict.get('right_total') > res_dict.get('left_total'): to_split_pair.append((best_index + 1, end)) to_split_pair.append((start, best_index + 1)) else: to_split_pair.append((start, best_index + 1)) to_split_pair.append((best_index + 1, end)) # LOGGER.debug("to_split_pair: {}".format(to_split_pair)) if len(res_split_index) == 0: LOGGER.warning("Best ks optimal binning fail to split. Take middle split point instead") res_split_index.append(len(bucket_list) // 2) res_split_index = sorted(res_split_index) res_ks = [] if res_split_ks: res_ks = [res_split_ks[idx] for idx in res_split_index] # last bin # res_ks.append(0.0) res_split_index.append(len(bucket_list)) start = 0 bucket_res = [] non_mixture_num = 0 small_size_num = 0 for bucket_idx, end in enumerate(res_split_index): new_bucket = _merge_buckets(start, end, bucket_idx) bucket_res.append(new_bucket) if not new_bucket.is_mixed: non_mixture_num += 1 if new_bucket.total_count < min_item_num: small_size_num += 1 start = end return bucket_res, non_mixture_num, small_size_num, res_ks def bin_sum_to_bucket_list(self, bin_sum, partitions): """ Convert bin sum result, which typically get from host, to bucket list Parameters ---------- bin_sum : dict {'x1': [[event_count, non_event_count], [event_count, non_event_count] ... ], 'x2': [[event_count, non_event_count], [event_count, non_event_count] ... ], ... } partitions: int Indicate partitions for created table. Returns ------- A Table whose keys are feature names and values are bucket lists """ bucket_dict = dict() for col_name, bin_res_list in bin_sum.items(): bucket_list = [] for b_idx in range(len(bin_res_list)): bucket = bucket_info.Bucket(b_idx, self.adjustment_factor) if b_idx == 0: bucket.set_left_neighbor(None) if b_idx == len(bin_res_list) - 1: bucket.set_right_neighbor(None) bucket.event_count = bin_res_list[b_idx][0] bucket.non_event_count = bin_res_list[b_idx][1] bucket.left_bound = b_idx - 1 bucket.right_bound = b_idx bucket.event_total = self.event_total bucket.non_event_total = self.non_event_total bucket_list.append(bucket) bucket_dict[col_name] = bucket_list result = [] for col_name, bucket_list in bucket_dict.items(): result.append((col_name, bucket_list)) result_table = session.parallelize(result, include_key=True, partition=partitions) return result_table
30,704
49.668317
117
py
FATE
FATE-master/python/federatedml/evaluation/performance_recorder.py
from federatedml.util import consts class PerformanceRecorder(object): """ This class record performance(single value metrics during the training process) """ def __init__(self): # all of them are single value metrics self.allowed_metric = [consts.AUC, consts.EXPLAINED_VARIANCE, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR, consts.MEDIAN_ABSOLUTE_ERROR, consts.R2_SCORE, consts.ROOT_MEAN_SQUARED_ERROR, consts.PRECISION, consts.RECALL, consts.ACCURACY, consts.KS ] self.larger_is_better = [consts.AUC, consts.R2_SCORE, consts.PRECISION, consts.RECALL, consts.EXPLAINED_VARIANCE, consts.ACCURACY, consts.KS ] self.smaller_is_better = [consts.ROOT_MEAN_SQUARED_ERROR, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR] self.cur_best_performance = {} self.no_improvement_round = {} # record no improvement round of all metrics def has_improved(self, val: float, metric: str, cur_best: dict): if metric not in cur_best: return True if metric in self.larger_is_better and val > cur_best[metric]: return True elif metric in self.smaller_is_better and val < cur_best[metric]: return True return False def update(self, eval_dict: dict): """ Parameters ---------- eval_dict dict, {metric_name:metric_val}, e.g. {'auc':0.99} Returns stop flag, if should stop return True, else False ------- """ if len(eval_dict) == 0: return for metric in eval_dict: if metric not in self.allowed_metric: continue if self.has_improved( eval_dict[metric], metric, self.cur_best_performance): self.cur_best_performance[metric] = eval_dict[metric] self.no_improvement_round[metric] = 0 else: self.no_improvement_round[metric] += 1
2,787
33
84
py
FATE
FATE-master/python/federatedml/evaluation/metric_interface.py
from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve import numpy as np import logging from federatedml.util import consts from federatedml.evaluation.metrics import classification_metric from federatedml.evaluation.metrics import regression_metric from federatedml.evaluation.metrics import clustering_metric from functools import wraps class MetricInterface(object): def __init__(self, pos_label: int, eval_type: str): self.pos_label = pos_label self.eval_type = eval_type def auc(self, labels, pred_scores): """ Compute AUC for binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The AUC """ if self.eval_type == consts.BINARY: return roc_auc_score(labels, pred_scores) elif self.eval_type == consts.ONE_VS_REST: try: score = roc_auc_score(labels, pred_scores) except BaseException: score = 0 # in case all labels are 0 or 1 logging.warning("all true labels are 0/1 when running ovr AUC") return score else: logging.warning( "auc is just suppose Binary Classification! return None as results") return None @staticmethod def explained_variance(labels, pred_scores): """ Compute explain variance Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The explain variance """ return regression_metric.ExplainedVariance().compute(labels, pred_scores) @staticmethod def mean_absolute_error(labels, pred_scores): """ Compute mean absolute error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A non-negative floating point. """ return regression_metric.MAE().compute(labels, pred_scores) @staticmethod def mean_squared_error(labels, pred_scores): """ Compute mean square error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A non-negative floating point value """ return regression_metric.MSE.compute(labels, pred_scores) @staticmethod def median_absolute_error(labels, pred_scores): """ Compute median absolute error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A positive floating point value """ return regression_metric.MedianAbsoluteError().compute(labels, pred_scores) @staticmethod def r2_score(labels, pred_scores): """ Compute R^2 (coefficient of determination) score Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The R^2 score """ return regression_metric.R2Score().compute(labels, pred_scores) @staticmethod def root_mean_squared_error(labels, pred_scores): """ Compute the root of mean square error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Return ---------- float A positive floating point value """ return regression_metric.RMSE.compute(labels, pred_scores) @staticmethod def __to_int_list(array: np.ndarray): return list(map(int, list(array))) @staticmethod def __filt_threshold(thresholds, step): cuts = list(map(float, np.arange(0, 1, step))) size = len(list(thresholds)) thresholds.sort(reverse=True) index_list = [int(size * cut) for cut in cuts] new_thresholds = [thresholds[idx] for idx in index_list] return new_thresholds, cuts def roc(self, labels, pred_scores): if self.eval_type == consts.BINARY: fpr, tpr, thresholds = roc_curve( np.array(labels), np.array(pred_scores), drop_intermediate=1) fpr, tpr, thresholds = list(map(float, fpr)), list( map(float, tpr)), list(map(float, thresholds)) filt_thresholds, cuts = self.__filt_threshold( thresholds=thresholds, step=0.01) new_thresholds = [] new_tpr = [] new_fpr = [] for threshold in filt_thresholds: index = thresholds.index(threshold) new_tpr.append(tpr[index]) new_fpr.append(fpr[index]) new_thresholds.append(threshold) fpr = new_fpr tpr = new_tpr thresholds = new_thresholds return fpr, tpr, thresholds, cuts else: logging.warning( "roc_curve is just suppose Binary Classification! return None as results") fpr, tpr, thresholds, cuts = None, None, None, None return fpr, tpr, thresholds, cuts def ks(self, labels, pred_scores): """ Compute Kolmogorov-Smirnov Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- max_ks_interval: float max value of each tpr - fpt fpr: """ if self.eval_type == consts.ONE_VS_REST: try: rs = classification_metric.KS().compute(labels, pred_scores) except BaseException: rs = [0, [0], [0], [0], [0]] # in case all labels are 0 or 1 logging.warning("all true labels are 0/1 when running ovr KS") return rs else: return classification_metric.KS().compute(labels, pred_scores) def lift(self, labels, pred_scores): """ Compute lift of binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not, if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None Returns ---------- float The lift """ if self.eval_type == consts.BINARY: return classification_metric.Lift().compute(labels, pred_scores) else: logging.warning( "lift is just suppose Binary Classification! return None as results") return None def gain(self, labels, pred_scores): """ Compute gain of binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not, if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None Returns ---------- float The gain """ if self.eval_type == consts.BINARY: return classification_metric.Gain().compute(labels, pred_scores) else: logging.warning( "gain is just suppose Binary Classification! return None as results") return None def precision(self, labels, pred_scores): """ Compute the precision Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not, if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None result_filter: value list. If result_filter is not None, it will filter the label results not in result_filter. Returns ---------- dict The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's precision. """ if self.eval_type == consts.BINARY: precision_operator = classification_metric.BiClassPrecision() metric_scores, score_threshold, cuts = precision_operator.compute( labels, pred_scores) return metric_scores, cuts, score_threshold elif self.eval_type == consts.MULTY: precision_operator = classification_metric.MultiClassPrecision() return precision_operator.compute(labels, pred_scores) else: logging.warning( "error:can not find classification type:{}".format( self.eval_type)) def recall(self, labels, pred_scores): """ Compute the recall Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- dict The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's recall. """ if self.eval_type == consts.BINARY: recall_operator = classification_metric.BiClassRecall() recall_res, thresholds, cuts = recall_operator.compute( labels, pred_scores) return recall_res, cuts, thresholds elif self.eval_type == consts.MULTY: recall_operator = classification_metric.MultiClassRecall() return recall_operator.compute(labels, pred_scores) else: logging.warning( "error:can not find classification type:{}".format( self.eval_type)) def accuracy(self, labels, pred_scores, normalize=True): """ Compute the accuracy Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. normalize: bool. If true, return the fraction of correctly classified samples, else returns the number of correctly classified samples Returns ---------- dict the key is threshold and the value is the accuracy of this threshold. """ if self.eval_type == consts.BINARY: acc_operator = classification_metric.BiClassAccuracy() acc_res, thresholds, cuts = acc_operator.compute( labels, pred_scores, normalize) return acc_res, cuts, thresholds elif self.eval_type == consts.MULTY: acc_operator = classification_metric.MultiClassAccuracy() return acc_operator.compute(labels, pred_scores, normalize) else: logging.warning( "error:can not find classification type:".format( self.eval_type)) def f1_score(self, labels, pred_scores): """ compute f1_score for binary classification result """ if self.eval_type == consts.BINARY: f1_scores, score_threshold, cuts = classification_metric.FScore().compute(labels, pred_scores) return list(f1_scores), list(cuts), list(score_threshold) else: logging.warning( 'error: f-score metric is for binary classification only') def confusion_mat(self, labels, pred_scores): """ compute confusion matrix """ if self.eval_type == consts.BINARY: sorted_labels, sorted_scores = classification_metric.sort_score_and_label( labels, pred_scores) _, cuts = classification_metric.ThresholdCutter.cut_by_step( sorted_scores, steps=0.01) fixed_interval_threshold = classification_metric.ThresholdCutter.fixed_interval_threshold() confusion_mat = classification_metric.ConfusionMatrix.compute( sorted_labels, sorted_scores, fixed_interval_threshold, ret=[ 'tp', 'fp', 'fn', 'tn']) confusion_mat['tp'] = self.__to_int_list(confusion_mat['tp']) confusion_mat['fp'] = self.__to_int_list(confusion_mat['fp']) confusion_mat['fn'] = self.__to_int_list(confusion_mat['fn']) confusion_mat['tn'] = self.__to_int_list(confusion_mat['tn']) return confusion_mat, cuts, fixed_interval_threshold else: logging.warning( 'error: f-score metric is for binary classification only') def psi( self, train_scores, validate_scores, train_labels, validate_labels, debug=False): """ Compute the PSI index Parameters ---------- train_scores: The predict results of train data validate_scores: The predict results of validate data train_labels: labels of train set validate_labels: labels of validate set debug: print additional info """ if self.eval_type == consts.BINARY: psi_computer = classification_metric.PSI() psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \ train_pos_perc, validate_pos_perc, intervals = psi_computer.compute(train_scores, validate_scores, debug=debug, str_intervals=True, round_num=6, train_labels=train_labels, validate_labels=validate_labels) len_list = np.array([len(psi_scores), len(expected_interval), len(expected_percentage), len(actual_interval), len(actual_percentage), len(intervals)]) assert (len_list == len(psi_scores)).all() return list(psi_scores), total_psi, self.__to_int_list(expected_interval), list(expected_percentage), \ self.__to_int_list(actual_interval), list(actual_percentage), list(train_pos_perc), \ list(validate_pos_perc), intervals else: logging.warning( 'error: psi metric is for binary classification only') def quantile_pr(self, labels, pred_scores): if self.eval_type == consts.BINARY: p = classification_metric.BiClassPrecision( cut_method='quantile', remove_duplicate=False) r = classification_metric.BiClassRecall( cut_method='quantile', remove_duplicate=False) p_scores, score_threshold, cuts = p.compute(labels, pred_scores) r_scores, score_threshold, cuts = r.compute(labels, pred_scores) p_scores = list(map(list, np.flip(p_scores, axis=0))) r_scores = list(map(list, np.flip(r_scores, axis=0))) score_threshold = list(np.flip(score_threshold)) return p_scores, r_scores, score_threshold else: logging.warning( 'error: pr quantile is for binary classification only') @staticmethod def jaccard_similarity_score(labels, pred_labels): """ Compute the Jaccard similarity score Parameters ---------- labels: value list. The labels of data set. pred_labels: value list. The predict results of model. It should be corresponding to labels each data. Return ---------- float A positive floating point value """ return clustering_metric.JaccardSimilarityScore().compute(labels, pred_labels) @staticmethod def fowlkes_mallows_score(labels, pred_labels): """ Compute the Fowlkes Mallows score Parameters ---------- labels: value list. The labels of data set. pred_labels: value list. The predict results of model. It should be corresponding to labels each data. Return ---------- float A positive floating point value """ return clustering_metric.FowlkesMallowsScore().compute(labels, pred_labels) @staticmethod def adjusted_rand_score(labels, pred_labels): """ Compute the adjusted-rand score Parameters ---------- labels: value list. The labels of data set. pred_labels: value list. The predict results of model. It should be corresponding to labels each data. Return ---------- float A positive floating point value """ return clustering_metric.AdjustedRandScore().compute(labels, pred_labels) @staticmethod def davies_bouldin_index(cluster_avg_intra_dist, cluster_inter_dist): """ Compute the davies_bouldin_index Parameters """ # process data from evaluation return clustering_metric.DaviesBouldinIndex().compute( cluster_avg_intra_dist, cluster_inter_dist) @staticmethod def contingency_matrix(labels, pred_labels): """ """ return clustering_metric.ContengincyMatrix().compute(labels, pred_labels) @staticmethod def distance_measure( cluster_avg_intra_dist, cluster_inter_dist, max_radius): """ """ return clustering_metric.DistanceMeasure().compute( cluster_avg_intra_dist, cluster_inter_dist, max_radius)
19,476
37.72167
156
py
FATE
FATE-master/python/federatedml/evaluation/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # from federatedml.evaluation.evaluation import Evaluation # # __all__ = ["Evaluation"]
705
34.3
75
py
FATE
FATE-master/python/federatedml/evaluation/evaluation.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import defaultdict import math from federatedml.util import LOGGER from federatedml.model_base import Metric, MetricMeta from federatedml.param import EvaluateParam from federatedml.util import consts from federatedml.model_base import ModelBase from federatedml.evaluation.metric_interface import MetricInterface from federatedml.statistic.data_overview import predict_detail_str_to_dict import numpy as np class Evaluation(ModelBase): def __init__(self): super().__init__() self.model_param = EvaluateParam() self.eval_results = defaultdict(list) self.save_single_value_metric_list = [consts.AUC, consts.EXPLAINED_VARIANCE, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR, consts.MEDIAN_ABSOLUTE_ERROR, consts.R2_SCORE, consts.ROOT_MEAN_SQUARED_ERROR, consts.JACCARD_SIMILARITY_SCORE, consts.ADJUSTED_RAND_SCORE, consts.FOWLKES_MALLOWS_SCORE, consts.DAVIES_BOULDIN_INDEX ] self.special_metric_list = [consts.PSI] self.clustering_intra_metric_list = [ consts.DAVIES_BOULDIN_INDEX, consts.DISTANCE_MEASURE] self.metrics = None self.round_num = 6 self.eval_type = None # where to call metric computations self.metric_interface: MetricInterface = None self.psi_train_scores, self.psi_validate_scores = None, None self.psi_train_labels, self.psi_validate_labels = None, None # multi unfold setting self.need_unfold_multi_result = False # summaries self.metric_summaries = {} def _init_model(self, model): self.model_param = model self.eval_type = self.model_param.eval_type self.pos_label = self.model_param.pos_label self.need_unfold_multi_result = self.model_param.unfold_multi_result self.metrics = model.metrics self.metric_interface = MetricInterface( pos_label=self.pos_label, eval_type=self.eval_type, ) def _run_data(self, data_sets=None, stage=None): if not self.need_run: return data = {} for data_key in data_sets: if data_sets[data_key].get("data", None): data[data_key] = data_sets[data_key]["data"] if stage == "fit": self.data_output = self.fit(data) else: LOGGER.warning("Evaluation has not transform, return") def split_data_with_type(self, data: list) -> dict: split_result = defaultdict(list) for value in data: mode = value[1][-1] split_result[mode].append(value) return split_result def _classification_and_regression_extract(self, data): """ extract labels and predict results from data in classification/regression type format """ labels = [] pred_scores = [] pred_labels = [] for d in data: labels.append(d[1][0]) pred_labels.append(d[1][1]) pred_scores.append(d[1][2]) if self.eval_type == consts.BINARY or self.eval_type == consts.REGRESSION: if self.pos_label and self.eval_type == consts.BINARY: labels_arr = np.array(labels) labels_arr[labels_arr == self.pos_label] = 1 labels_arr[labels_arr != self.pos_label] = 0 labels = list(labels_arr) pred_results = pred_scores else: pred_results = pred_labels return labels, pred_results def _clustering_extract(self, data): """ extract data according to data format """ true_cluster_index, predicted_cluster_index = [], [] intra_cluster_data, inter_cluster_dist = { 'avg_dist': [], 'max_radius': []}, [] run_intra_metrics = False # run intra metrics or outer metrics ? if len(data[0][1]) == 3: # [int int] -> [true_label, predicted label] -> outer metric # [int np.array] - > [predicted label, distance] -> need no metric computation if not ( isinstance( data[0][1][0], int) and isinstance( data[0][1][1], int)): return None, None, run_intra_metrics if len(data[0][1]) == 5: # the input format is for intra metrics run_intra_metrics = True cluster_index_list = [] for d in data: if run_intra_metrics: cluster_index_list.append(d[0]) intra_cluster_data['avg_dist'].append(d[1][1]) intra_cluster_data['max_radius'].append(d[1][2]) if len(inter_cluster_dist) == 0: inter_cluster_dist += d[1][3] else: true_cluster_index.append(d[1][0]) predicted_cluster_index.append(d[1][1]) # if cluster related data exists, sort by cluster index if len(cluster_index_list) != 0: to_sort = list(zip(cluster_index_list, intra_cluster_data['avg_dist'], intra_cluster_data['max_radius'])) sort_rs = sorted(to_sort, key=lambda x: x[0]) # cluster index intra_cluster_data['avg_dist'] = [i[1] for i in sort_rs] intra_cluster_data['max_radius'] = [i[2] for i in sort_rs] return ( true_cluster_index, predicted_cluster_index, run_intra_metrics) if not run_intra_metrics else ( intra_cluster_data, inter_cluster_dist, run_intra_metrics) def _evaluate_classification_and_regression_metrics(self, mode, data): labels, pred_results = self._classification_and_regression_extract( data) eval_result = defaultdict(list) for eval_metric in self.metrics: if eval_metric not in self.special_metric_list: res = getattr( self.metric_interface, eval_metric)( labels, pred_results) if res is not None: try: if math.isinf(res): res = float(-9999999) LOGGER.info("res is inf, set to {}".format(res)) except BaseException: pass eval_result[eval_metric].append(mode) eval_result[eval_metric].append(res) elif eval_metric == consts.PSI: if mode == 'train': self.psi_train_scores = pred_results self.psi_train_labels = labels elif mode == 'validate': self.psi_validate_scores = pred_results self.psi_validate_labels = labels if self.psi_train_scores is not None and self.psi_validate_scores is not None: res = self.metric_interface.psi( self.psi_train_scores, self.psi_validate_scores, self.psi_train_labels, self.psi_validate_labels) eval_result[eval_metric].append(mode) eval_result[eval_metric].append(res) # delete saved scores after computing a psi pair self.psi_train_scores, self.psi_validate_scores = None, None return eval_result def _evaluate_clustering_metrics(self, mode, data): eval_result = defaultdict(list) rs0, rs1, run_outer_metric = self._clustering_extract(data) if rs0 is None and rs1 is None: # skip evaluation computation if get this input format LOGGER.debug( 'skip computing, this clustering format is not for metric computation') return eval_result if not run_outer_metric: no_label = set(rs0) == {None} if no_label: LOGGER.debug( 'no label found in clustering result, skip metric computation') return eval_result for eval_metric in self.metrics: # if input format and required metrics matches ? XNOR if not ((not (eval_metric in self.clustering_intra_metric_list) and not run_outer_metric) + ((eval_metric in self.clustering_intra_metric_list) and run_outer_metric)): LOGGER.warning( 'input data format does not match current clustering metric: {}'.format(eval_metric)) continue LOGGER.debug('clustering_metrics is {}'.format(eval_metric)) if run_outer_metric: if eval_metric == consts.DISTANCE_MEASURE: res = getattr( self.metric_interface, eval_metric)( rs0['avg_dist'], rs1, rs0['max_radius']) else: res = getattr( self.metric_interface, eval_metric)( rs0['avg_dist'], rs1) else: res = getattr(self.metric_interface, eval_metric)(rs0, rs1) eval_result[eval_metric].append(mode) eval_result[eval_metric].append(res) return eval_result @staticmethod def _check_clustering_input(data): # one evaluation component is only available for one kmeans component # in current version input_num = len(data.items()) if input_num > 1: raise ValueError( 'multiple input detected, ' 'one evaluation component is only available ' 'for one clustering(kmean) component in current version') @staticmethod def _unfold_multi_result(score_list): """ one-vs-rest transformation: multi classification result to several binary classification results """ binary_result = {} for key, multi_result in score_list: true_label = multi_result[0] predicted_label = multi_result[1] multi_score = predict_detail_str_to_dict(multi_result[3]) data_type = multi_result[-1] # to binary predict result format for multi_label in multi_score: bin_label = 1 if str(multi_label) == str(true_label) else 0 bin_predicted_label = 1 if str( multi_label) == str(predicted_label) else 0 bin_score = multi_score[multi_label] neg_bin_score = 1 - bin_score result_list = [ bin_label, bin_predicted_label, bin_score, { 1: bin_score, 0: neg_bin_score}, data_type] if multi_label not in binary_result: binary_result[multi_label] = [] binary_result[multi_label].append((key, result_list)) return binary_result def evaluate_metrics(self, mode: str, data: list) -> dict: eval_result = None if self.eval_type != consts.CLUSTERING: eval_result = self._evaluate_classification_and_regression_metrics( mode, data) elif self.eval_type == consts.CLUSTERING: LOGGER.debug('running clustering') eval_result = self._evaluate_clustering_metrics(mode, data) return eval_result def obtain_data(self, data_list): return data_list def check_data(self, data): if len(data) <= 0: return if self.eval_type == consts.CLUSTERING: self._check_clustering_input(data) else: for key, eval_data in data.items(): if eval_data is None: continue sample = eval_data.take(1)[0] # label, predict_type, predict_score, predict_detail, type if not isinstance( sample[1].features, list) or len( sample[1].features) != 5: raise ValueError( 'length of table header mismatch, expected length is 5, got:{},' 'please check the input of the Evaluation Module, result of ' 'cross validation is not supported.'.format(sample)) def fit(self, data, return_result=False): self.check_data(data) LOGGER.debug(f'running eval, data: {data}') self.eval_results.clear() for (key, eval_data) in data.items(): if eval_data is None: LOGGER.debug( 'data with {} is None, skip metric computation'.format(key)) continue collected_data = list(eval_data.collect()) if len(collected_data) == 0: continue eval_data_local = [] for k, v in collected_data: eval_data_local.append((k, v.features)) split_data_with_label = self.split_data_with_type(eval_data_local) for mode, data in split_data_with_label.items(): eval_result = self.evaluate_metrics(mode, data) self.eval_results[key].append(eval_result) if self.need_unfold_multi_result and self.eval_type == consts.MULTY: unfold_binary_eval_result = defaultdict(list) # set work mode to binary evaluation self.eval_type = consts.BINARY self.metric_interface.eval_type = consts.ONE_VS_REST back_up_metric = self.metrics self.metrics = [consts.AUC, consts.KS] for mode, data in split_data_with_label.items(): unfold_multi_data = self._unfold_multi_result( eval_data_local) for multi_label, marginal_bin_result in unfold_multi_data.items(): eval_result = self.evaluate_metrics( mode, marginal_bin_result) new_key = key + '_class_{}'.format(multi_label) unfold_binary_eval_result[new_key].append(eval_result) self.callback_ovr_metric_data(unfold_binary_eval_result) # recover work mode self.eval_type = consts.MULTY self.metric_interface.eval_type = consts.MULTY self.metrics = back_up_metric return self.callback_metric_data( self.eval_results, return_single_val_metrics=return_result) def __save_single_value( self, result, metric_name, metric_namespace, eval_name): metric_type = 'EVALUATION_SUMMARY' if eval_name in consts.ALL_CLUSTER_METRICS: metric_type = 'CLUSTERING_EVALUATION_SUMMARY' self.tracker.log_metric_data( metric_namespace, metric_name, [ Metric( eval_name, np.round( result, self.round_num))]) self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric_type)) def __save_curve_data( self, x_axis_list, y_axis_list, metric_name, metric_namespace): points = [] for i, value in enumerate(x_axis_list): if isinstance(value, float): value = np.round(value, self.round_num) points.append((value, np.round(y_axis_list[i], self.round_num))) points.sort(key=lambda x: x[0]) metric_points = [Metric(point[0], point[1]) for point in points] self.tracker.log_metric_data( metric_namespace, metric_name, metric_points) def __save_curve_meta( self, metric_name, metric_namespace, metric_type, unit_name=None, ordinate_name=None, curve_name=None, best=None, pair_type=None, thresholds=None): extra_metas = {} metric_type = "_".join([metric_type, "EVALUATION"]) key_list = [ "unit_name", "ordinate_name", "curve_name", "best", "pair_type", "thresholds"] for key in key_list: value = locals()[key] if value: if key == "thresholds": value = np.round(value, self.round_num).tolist() extra_metas[key] = value self.tracker.set_metric_meta(metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric_type, extra_metas=extra_metas)) @staticmethod def __multi_class_label_padding(metrics, label_indices): # in case some labels don't appear when running homo-multi-class algo label_num = np.max(label_indices) + 1 index_result_mapping = dict(zip(label_indices, metrics)) new_metrics, new_label_indices = [], [] for i in range(label_num): if i in index_result_mapping: new_metrics.append(index_result_mapping[i]) else: new_metrics.append(0.0) new_label_indices.append(i) return new_metrics, new_label_indices @staticmethod def __filt_override_unit_ordinate_coordinate(x_sets, y_sets): max_y_dict = {} for idx, x_value in enumerate(x_sets): if x_value not in max_y_dict: max_y_dict[x_value] = {"max_y": y_sets[idx], "idx": idx} else: max_y = max_y_dict[x_value]["max_y"] if max_y < y_sets[idx]: max_y_dict[x_value] = {"max_y": y_sets[idx], "idx": idx} x = [] y = [] idx_list = [] for key, value in max_y_dict.items(): x.append(key) y.append(value["max_y"]) idx_list.append(value["idx"]) return x, y, idx_list def __process_single_value_data(self, metric, metric_res): single_val_metric = None if metric in self.save_single_value_metric_list or \ (metric == consts.ACCURACY and self.eval_type == consts.MULTY): single_val_metric = metric_res[1] elif metric == consts.KS: best_ks, fpr, tpr, thresholds, cuts = metric_res[1] single_val_metric = best_ks elif metric in [consts.RECALL, consts.PRECISION] and self.eval_type == consts.MULTY: pos_score = metric_res[1][0] single_val_metric = float(np.array(pos_score).mean()) return single_val_metric @staticmethod def __filter_duplicate_roc_data_point(fpr, tpr, thresholds): data_point_set = set() new_fpr, new_tpr, new_threshold = [], [], [] for fpr_, tpr_, thres in zip(fpr, tpr, thresholds): if (fpr_, tpr_, thres) not in data_point_set: data_point_set.add((fpr_, tpr_, thres)) new_fpr.append(fpr_) new_tpr.append(tpr_) new_threshold.append(thres) return new_fpr, new_tpr, new_threshold def __save_roc_curve( self, data_name, metric_name, metric_namespace, metric_res): fpr, tpr, thresholds, _ = metric_res fpr, tpr, thresholds = self.__filter_duplicate_roc_data_point( fpr, tpr, thresholds) # set roc edge value fpr.append(1.0) tpr.append(1.0) thresholds.append(1.0) self.__save_curve_data(fpr, tpr, metric_name, metric_namespace) self.__save_curve_meta( metric_name=metric_name, metric_namespace=metric_namespace, metric_type="ROC", unit_name="fpr", ordinate_name="tpr", curve_name=data_name, thresholds=thresholds) def __save_ks_curve( self, metric, metric_res, metric_name, metric_namespace, data_name): best_ks, fpr, tpr, thresholds, cuts = metric_res[1] for curve_name, curve_data in zip(["fpr", "tpr"], [fpr, tpr]): metric_name_fpr = '_'.join([metric_name, curve_name]) curve_name_fpr = "_".join([data_name, curve_name]) self.__save_curve_data( cuts, curve_data, metric_name_fpr, metric_namespace) self.__save_curve_meta( metric_name=metric_name_fpr, metric_namespace=metric_namespace, metric_type=metric.upper(), unit_name="", curve_name=curve_name_fpr, pair_type=data_name, thresholds=thresholds) def __save_lift_gain_curve( self, metric, metric_res, metric_name, metric_namespace, data_name): score, cuts, thresholds = metric_res[1] score = [float(s[1]) for s in score] cuts = [float(c[1]) for c in cuts] cuts, score, idx_list = self.__filt_override_unit_ordinate_coordinate( cuts, score) thresholds = [thresholds[idx] for idx in idx_list] score.append(1.0) cuts.append(1.0) thresholds.append(0.0) self.__save_curve_data(cuts, score, metric_name, metric_namespace) self.__save_curve_meta( metric_name=metric_name, metric_namespace=metric_namespace, metric_type=metric.upper(), unit_name="", curve_name=data_name, thresholds=thresholds) def __save_accuracy_curve( self, metric, metric_res, metric_name, metric_namespace, data_name): if self.eval_type == consts.MULTY: return score, cuts, thresholds = metric_res[1] self.__save_curve_data(cuts, score, metric_name, metric_namespace) self.__save_curve_meta( metric_name=metric_name, metric_namespace=metric_namespace, metric_type=metric.upper(), unit_name="", curve_name=data_name, thresholds=thresholds) def __save_pr_curve(self, precision_and_recall, data_name): precision_res = precision_and_recall[consts.PRECISION] recall_res = precision_and_recall[consts.RECALL] if precision_res[0] != recall_res[0]: LOGGER.warning( "precision mode:{} is not equal to recall mode:{}".format( precision_res[0], recall_res[0])) return metric_namespace = precision_res[0] metric_name_precision = '_'.join([data_name, "precision"]) metric_name_recall = '_'.join([data_name, "recall"]) pos_precision_score = precision_res[1][0] precision_cuts = precision_res[1][1] if len(precision_res[1]) >= 3: precision_thresholds = precision_res[1][2] else: precision_thresholds = None pos_recall_score = recall_res[1][0] recall_cuts = recall_res[1][1] if len(recall_res[1]) >= 3: recall_thresholds = recall_res[1][2] else: recall_thresholds = None precision_curve_name = data_name recall_curve_name = data_name if self.eval_type == consts.BINARY: pos_precision_score = [score[1] for score in pos_precision_score] pos_recall_score = [score[1] for score in pos_recall_score] pos_recall_score, pos_precision_score, idx_list = self.__filt_override_unit_ordinate_coordinate( pos_recall_score, pos_precision_score) precision_cuts = [precision_cuts[idx] for idx in idx_list] recall_cuts = [recall_cuts[idx] for idx in idx_list] edge_idx = idx_list[-1] if edge_idx == len(precision_thresholds) - 1: idx_list = idx_list[:-1] precision_thresholds = [ precision_thresholds[idx] for idx in idx_list] recall_thresholds = [recall_thresholds[idx] for idx in idx_list] elif self.eval_type == consts.MULTY: pos_recall_score, recall_cuts = self.__multi_class_label_padding( pos_recall_score, recall_cuts) pos_precision_score, precision_cuts = self.__multi_class_label_padding( pos_precision_score, precision_cuts) self.__save_curve_data( precision_cuts, pos_precision_score, metric_name_precision, metric_namespace) self.__save_curve_meta(metric_name_precision, metric_namespace, "_".join([consts.PRECISION.upper(), self.eval_type.upper()]), unit_name="", ordinate_name="Precision", curve_name=precision_curve_name, pair_type=data_name, thresholds=precision_thresholds) self.__save_curve_data( recall_cuts, pos_recall_score, metric_name_recall, metric_namespace) self.__save_curve_meta(metric_name_recall, metric_namespace, "_".join([consts.RECALL.upper(), self.eval_type.upper()]), unit_name="", ordinate_name="Recall", curve_name=recall_curve_name, pair_type=data_name, thresholds=recall_thresholds) def __save_confusion_mat_table( self, metric, confusion_mat, thresholds, metric_name, metric_namespace): extra_metas = { 'tp': list( confusion_mat['tp']), 'tn': list( confusion_mat['tn']), 'fp': list( confusion_mat['fp']), 'fn': list( confusion_mat['fn']), 'thresholds': list( np.round( thresholds, self.round_num))} self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __save_f1_score_table( self, metric, f1_scores, thresholds, metric_name, metric_namespace): extra_metas = { 'f1_scores': list( np.round( f1_scores, self.round_num)), 'thresholds': list( np.round( thresholds, self.round_num))} self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __save_psi_table( self, metric, metric_res, metric_name, metric_namespace): psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \ train_pos_perc, validate_pos_perc, intervals = metric_res[1] extra_metas = { 'psi_scores': list( np.round( psi_scores, self.round_num)), 'total_psi': round( total_psi, self.round_num), 'expected_interval': list(expected_interval), 'expected_percentage': list(expected_percentage), 'actual_interval': list(actual_interval), 'actual_percentage': list(actual_percentage), 'intervals': list(intervals), 'train_pos_perc': train_pos_perc, 'validate_pos_perc': validate_pos_perc} self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __save_pr_table( self, metric, metric_res, metric_name, metric_namespace): p_scores, r_scores, score_threshold = metric_res extra_metas = {'p_scores': list(map(list, np.round(p_scores, self.round_num))), 'r_scores': list(map(list, np.round(r_scores, self.round_num))), 'thresholds': list(np.round(score_threshold, self.round_num))} self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __save_contingency_matrix( self, metric, metric_res, metric_name, metric_namespace): result_array, unique_predicted_label, unique_true_label = metric_res true_labels = list(map(int, unique_true_label)) predicted_label = list(map(int, unique_predicted_label)) result_table = [] for l_ in result_array: result_table.append(list(map(int, l_))) extra_metas = { 'true_labels': true_labels, 'predicted_labels': predicted_label, 'result_table': result_table} self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __save_distance_measure(self, metric, metric_res: dict, metric_name, metric_namespace): extra_metas = {} cluster_index = [k for k in metric_res.keys()] radius, neareast_idx = [], [] for k in metric_res: radius.append(metric_res[k][0]) neareast_idx.append(metric_res[k][1]) extra_metas['cluster_index'] = cluster_index extra_metas['radius'] = radius extra_metas['nearest_idx'] = neareast_idx self.tracker.set_metric_meta( metric_namespace, metric_name, MetricMeta( name=metric_name, metric_type=metric.upper(), extra_metas=extra_metas)) def __update_summary(self, data_type, namespace, metric, metric_val): if data_type not in self.metric_summaries: self.metric_summaries[data_type] = {} if namespace not in self.metric_summaries[data_type]: self.metric_summaries[data_type][namespace] = {} self.metric_summaries[data_type][namespace][metric] = metric_val def __save_summary(self): LOGGER.info('eval summary is {}'.format(self.metric_summaries)) self.set_summary(self.metric_summaries) def callback_ovr_metric_data(self, eval_results): for model_name, eval_rs in eval_results.items(): train_callback_meta = defaultdict(dict) validate_callback_meta = defaultdict(dict) split_list = model_name.split('_') label = split_list[-1] # remove ' "class" label_index' origin_model_name_list = split_list[:-2] origin_model_name = '' for s in origin_model_name_list: origin_model_name += (s + '_') origin_model_name = origin_model_name[:-1] for rs_dict in eval_rs: for metric_name, metric_rs in rs_dict.items(): if metric_name == consts.KS: # ks value only, curve data is not needed metric_rs = [metric_rs[0], metric_rs[1][0]] metric_namespace = metric_rs[0] if metric_namespace == 'train': callback_meta = train_callback_meta else: callback_meta = validate_callback_meta callback_meta[label][metric_name] = metric_rs[1] self.tracker.set_metric_meta( "train", model_name + '_' + 'ovr', MetricMeta( name=origin_model_name, metric_type='ovr', extra_metas=train_callback_meta)) self.tracker.set_metric_meta( "validate", model_name + '_' + 'ovr', MetricMeta( name=origin_model_name, metric_type='ovr', extra_metas=validate_callback_meta)) LOGGER.debug( 'callback data {} {}'.format( train_callback_meta, validate_callback_meta)) def callback_metric_data( self, eval_results, return_single_val_metrics=False): # collect single val metric for validation strategy validate_metric = {} train_metric = {} collect_dict = {} for (data_type, eval_res_list) in eval_results.items(): precision_recall = {} for eval_res in eval_res_list: for (metric, metric_res) in eval_res.items(): metric_namespace = metric_res[0] if metric_namespace == 'validate': collect_dict = validate_metric elif metric_namespace == 'train': collect_dict = train_metric metric_name = '_'.join([data_type, metric]) single_val_metric = self.__process_single_value_data( metric, metric_res) if single_val_metric is not None: self.__save_single_value( single_val_metric, metric_name=data_type, metric_namespace=metric_namespace, eval_name=metric) collect_dict[metric] = single_val_metric # update pipeline summary self.__update_summary( data_type, metric_namespace, metric, single_val_metric) if metric == consts.KS: self.__save_ks_curve( metric, metric_res, metric_name, metric_namespace, data_type) elif metric == consts.ROC: self.__save_roc_curve( data_type, metric_name, metric_namespace, metric_res[1]) elif metric == consts.ACCURACY: self.__save_accuracy_curve( metric, metric_res, metric_name, metric_namespace, data_type) elif metric in [consts.GAIN, consts.LIFT]: self.__save_lift_gain_curve( metric, metric_res, metric_name, metric_namespace, data_type) elif metric in [consts.PRECISION, consts.RECALL]: precision_recall[metric] = metric_res if len(precision_recall) < 2: continue self.__save_pr_curve(precision_recall, data_type) precision_recall = {} # reset cached dict elif metric == consts.PSI: self.__save_psi_table( metric, metric_res, metric_name, metric_namespace) elif metric == consts.CONFUSION_MAT: confusion_mat, cuts, score_threshold = metric_res[1] self.__save_confusion_mat_table( metric, confusion_mat, score_threshold, metric_name, metric_namespace) elif metric == consts.F1_SCORE: f1_scores, cuts, score_threshold = metric_res[1] self.__save_f1_score_table( metric, f1_scores, score_threshold, metric_name, metric_namespace) elif metric == consts.QUANTILE_PR: self.__save_pr_table( metric, metric_res[1], metric_name, metric_namespace) elif metric == consts.CONTINGENCY_MATRIX: self.__save_contingency_matrix( metric, metric_res[1], metric_name, metric_namespace) elif metric == consts.DISTANCE_MEASURE: self.__save_distance_measure( metric, metric_res[1], metric_name, metric_namespace) self.__save_summary() if return_single_val_metrics: if len(validate_metric) != 0: LOGGER.debug("return validate metric") LOGGER.debug('validate metric is {}'.format(validate_metric)) return validate_metric else: LOGGER.debug("validate metric is empty, return train metric") LOGGER.debug('train metric is {}'.format(train_metric)) return train_metric else: return None @staticmethod def extract_data(data: dict): result = {} for k, v in data.items(): result[".".join(k.split(".")[:1])] = v return result
39,206
36.269011
108
py
FATE
FATE-master/python/federatedml/evaluation/test/test_evaluation_module.py
import unittest import numpy as np from federatedml.util import consts from federatedml.evaluation.metrics import classification_metric, clustering_metric, regression_metric from federatedml.evaluation.metric_interface import MetricInterface class TestEvaluation(unittest.TestCase): def setUp(self): self.bin_score = np.random.random(100) self.bin_label = (self.bin_score > 0.5) + 0 self.reg_score = np.random.random(100) * 10 self.reg_label = np.random.random(100) * 10 self.multi_score = np.random.randint([4 for i in range(50)]) self.multi_label = np.random.randint([4 for i in range(50)]) self.clustering_score = np.random.randint([4 for i in range(50)]) self.clustering_label = np.random.randint([3 for i in range(50)]) self.psi_train_score = np.random.random(10000) self.psi_train_label = (self.psi_train_score > 0.5) + 0 self.psi_val_score = np.random.random(1000) self.psi_val_label = (self.psi_val_score > 0.5) + 0 def test_regression(self): print('testing regression metric') regression_metric.R2Score().compute(self.reg_score, self.reg_label) regression_metric.MSE().compute(self.reg_score, self.reg_label) regression_metric.RMSE().compute(self.reg_score, self.reg_label) regression_metric.ExplainedVariance().compute(self.reg_score, self.reg_label) regression_metric.Describe().compute(self.reg_score) def test_binary(self): print('testing binary') interface = MetricInterface(pos_label=1, eval_type=consts.BINARY) interface.auc(self.bin_label, self.bin_score) interface.confusion_mat(self.bin_label, self.bin_score) interface.ks(self.bin_label, self.bin_score) interface.accuracy(self.bin_label, self.bin_score) interface.f1_score(self.bin_label, self.bin_score) interface.gain(self.bin_label, self.bin_score) interface.lift(self.bin_label, self.bin_score) interface.quantile_pr(self.bin_label, self.bin_score) interface.precision(self.bin_label, self.bin_score) interface.recall(self.bin_label, self.bin_score) interface.roc(self.bin_label, self.bin_score) def test_psi(self): interface = MetricInterface(pos_label=1, eval_type=consts.BINARY) interface.psi( self.psi_train_score, self.psi_val_score, train_labels=self.psi_train_label, validate_labels=self.psi_val_label) def test_multi(self): print('testing multi') interface = MetricInterface(eval_type=consts.MULTY, pos_label=1) interface.precision(self.multi_label, self.multi_score) interface.recall(self.multi_label, self.multi_score) interface.accuracy(self.multi_label, self.multi_score) def test_clustering(self): print('testing clustering') interface = MetricInterface(eval_type=consts.CLUSTERING, pos_label=1) interface.confusion_mat(self.clustering_label, self.clustering_score) def test_newly_added(self): print('testing newly added') binary_data = list( zip([i for i in range(len(self.psi_train_score))], self.psi_train_score)) classification_metric.Distribution().compute(binary_data, binary_data) multi_data = list( zip([i for i in range(len(self.multi_score))], self.multi_score)) classification_metric.Distribution().compute(multi_data, multi_data) classification_metric.KSTest().compute(self.multi_score, self.multi_score) classification_metric.KSTest().compute( self.psi_train_score, self.psi_val_score) classification_metric.AveragePrecisionScore().compute( self.psi_train_score, self.psi_val_score, self.psi_train_label, self.psi_val_label) if __name__ == '__main__': unittest.main()
3,943
40.957447
102
py
FATE
FATE-master/python/federatedml/evaluation/test/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/evaluation/metrics/regression_metric.py
from scipy.stats import stats from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score import numpy as np class RMSE(object): @staticmethod def compute(labels, pred_scores): return np.sqrt(mean_squared_error(labels, pred_scores)) class MAE(object): @staticmethod def compute(labels, pred_scores): return mean_absolute_error(labels, pred_scores) class R2Score(object): @staticmethod def compute(labels, pred_scores): return r2_score(labels, pred_scores) class MSE(object): @staticmethod def compute(labels, pred_scores): return mean_squared_error(labels, pred_scores) class ExplainedVariance(object): @staticmethod def compute(labels, pred_scores): return explained_variance_score(labels, pred_scores) class MedianAbsoluteError(object): @staticmethod def compute(labels, pred_scores): return median_absolute_error(labels, pred_scores) class IC(object): """ Compute Information Criterion with a given dTable and loss When k = 2, result is genuine AIC; when k = log(n), results is BIC, also called SBC, SIC, SBIC. """ def compute(self, k, n, dfe, loss): aic_score = k * dfe + 2 * n * loss return aic_score class IC_Approx(object): """ Compute Information Criterion value with a given dTable and loss When k = 2, result is genuine AIC; when k = log(n), results is BIC, also called SBC, SIC, SBIC. Note that this formula for linear regression dismisses the constant term n * np.log(2 * np.pi) for sake of simplicity, so the absolute value of result will be small. """ def compute(self, k, n, dfe, loss): aic_score = k * dfe + n * np.log(loss * 2) return aic_score class Describe(object): @staticmethod def compute(pred_scores): describe = stats.describe(pred_scores) metrics = {"min": describe.minmax[0], "max": describe.minmax[1], "mean": describe.mean, "variance": describe.variance, "skewness": describe.skewness, "kurtosis": describe.kurtosis} return metrics
2,315
25.930233
173
py
FATE
FATE-master/python/federatedml/evaluation/metrics/clustering_metric.py
import numpy as np from sklearn.metrics import jaccard_score as jaccard_similarity_score from sklearn.metrics import fowlkes_mallows_score from sklearn.metrics import adjusted_rand_score class JaccardSimilarityScore(object): """ Compute jaccard_similarity_score """ def compute(self, labels, pred_scores): return jaccard_similarity_score(labels, pred_scores, average="weighted") class FowlkesMallowsScore(object): """ Compute fowlkes_mallows_score, as in FMI """ def compute(self, labels, pred_scores): return fowlkes_mallows_score(labels, pred_scores) class AdjustedRandScore(object): """ Compute adjusted_rand_score,as in RI """ def compute(self, labels, pred_scores): return adjusted_rand_score(labels, pred_scores) class ContengincyMatrix(object): """ Compute contengincy_matrix """ def compute(self, labels, pred_scores): #total_count = len(labels) label_predict = list(zip(labels, pred_scores)) predicted_label = list(range(0, max(pred_scores) + 1)) unique_true_label = np.unique(labels) result_array = np.zeros([len(unique_true_label), max(pred_scores) + 1]) for v1, v2 in label_predict: result_array[v1][v2] += 1 return result_array, predicted_label, unique_true_label class DistanceMeasure(object): """ Compute distance_measure """ def compute(self, dist_table, inter_cluster_dist, max_radius): max_radius_result = max_radius cluster_nearest_result = [] if len(dist_table) == 1: cluster_nearest_result.append(0) else: for j in range(0, len(dist_table)): arr = inter_cluster_dist[j * (len(dist_table) - 1): (j + 1) * (len(dist_table) - 1)] smallest_index = list(arr).index(min(arr)) if smallest_index > j: smallest_index += 1 cluster_nearest_result.append(smallest_index) distance_measure_result = dict() for n in range(0, len(dist_table)): distance_measure_result[n] = [max_radius_result[n], cluster_nearest_result[n]] return distance_measure_result class DaviesBouldinIndex(object): """ Compute dbi,as in dbi """ def compute(self, dist_table, cluster_dist): if len(dist_table) == 1: return np.nan max_dij_list = [] d = 0 for i in range(0, len(dist_table)): dij_list = [] for j in range(0, len(dist_table)): if j != i: dij_list.append((dist_table[i] + dist_table[j]) / (cluster_dist[d] ** 0.5)) d += 1 max_dij = max(dij_list) max_dij_list.append(max_dij) return np.sum(max_dij_list) / len(dist_table)
2,852
30.01087
100
py
FATE
FATE-master/python/federatedml/evaluation/metrics/classification_metric.py
import copy import sys import numpy as np import pandas as pd from scipy.stats import stats from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import average_precision_score ROUND_NUM = 6 def neg_pos_count(labels: np.ndarray, pos_label: int): pos_num = ((labels == pos_label) + 0).sum() neg_num = len(labels) - pos_num return pos_num, neg_num def sort_score_and_label(labels: np.ndarray, pred_scores: np.ndarray): labels = np.array(labels) pred_scores = np.array(pred_scores) sort_idx = np.flip(pred_scores.argsort()) sorted_labels = labels[sort_idx] sorted_scores = pred_scores[sort_idx] return sorted_labels, sorted_scores class ConfusionMatrix(object): @staticmethod def compute(sorted_labels: list, sorted_pred_scores: list, score_thresholds: list, ret: list, pos_label=1): for ret_type in ret: assert ret_type in ['tp', 'tn', 'fp', 'fn'] sorted_labels = np.array(sorted_labels) sorted_scores = np.array(sorted_pred_scores) sorted_labels[sorted_labels != pos_label] = 0 sorted_labels[sorted_labels == pos_label] = 1 score_thresholds = np.array([score_thresholds]).transpose() pred_labels = (sorted_scores > score_thresholds) + 0 ret_dict = {} if 'tp' in ret or 'tn' in ret: match_arr = (pred_labels + sorted_labels) if 'tp' in ret: tp_num = (match_arr == 2).sum(axis=-1) ret_dict['tp'] = tp_num if 'tn' in ret: tn_num = (match_arr == 0).sum(axis=-1) ret_dict['tn'] = tn_num if 'fp' in ret or 'fn' in ret: match_arr = (sorted_labels - pred_labels) if 'fp' in ret: fp_num = (match_arr == -1).sum(axis=-1) ret_dict['fp'] = fp_num if 'fn' in ret: fn_num = (match_arr == 1).sum(axis=-1) ret_dict['fn'] = fn_num return ret_dict class ThresholdCutter(object): @staticmethod def cut_by_step(sorted_scores, steps=0.01): assert isinstance(steps, float) and (0 < steps < 1) thresholds = list(set(sorted_scores)) thresholds, cuts = ThresholdCutter.__filt_threshold(thresholds, 0.01) score_threshold = thresholds return score_threshold, cuts @staticmethod def fixed_interval_threshold(steps=0.01): intervals = np.array([i for i in range(0, 100)]) intervals = intervals * steps return intervals @staticmethod def cut_by_index(sorted_scores): cuts = np.array([c / 100 for c in range(100)]) data_size = len(sorted_scores) indexs = [int(data_size * cut) for cut in cuts] score_threshold = [sorted_scores[idx] for idx in indexs] return score_threshold, cuts @staticmethod def __filt_threshold(thresholds, step): cuts = list(map(float, np.arange(0, 1, step))) size = len(list(thresholds)) thresholds.sort(reverse=True) index_list = [int(size * cut) for cut in cuts] new_thresholds = [thresholds[idx] for idx in index_list] return new_thresholds, cuts @staticmethod def cut_by_quantile(scores, quantile_list=None, interpolation='nearest', remove_duplicate=True): if quantile_list is None: # default is 20 intervals quantile_list = [round(i * 0.05, 3) for i in range(20)] + [1.0] quantile_val = np.quantile(scores, quantile_list, interpolation=interpolation) if remove_duplicate: quantile_val = sorted(list(set(quantile_val))) else: quantile_val = sorted(list(quantile_val)) if len(quantile_val) == 1: quantile_val = [np.min(scores), np.max(scores)] return quantile_val class KS(object): @staticmethod def compute(labels, pred_scores, pos_label=1, fixed_interval_threshold=True): sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores) threshold, cuts = ThresholdCutter.cut_by_index(sorted_scores) confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, threshold, ret=['tp', 'fp'], pos_label=pos_label) pos_num, neg_num = neg_pos_count(sorted_labels, pos_label=pos_label) assert pos_num > 0 and neg_num > 0, "error when computing KS metric, pos sample number and neg sample number" \ "must be larger than 0" tpr_arr = confusion_mat['tp'] / pos_num fpr_arr = confusion_mat['fp'] / neg_num tpr = np.append(tpr_arr, np.array([1.0])) fpr = np.append(fpr_arr, np.array([1.0])) cuts = np.append(cuts, np.array([1.0])) ks_curve = tpr[:-1] - fpr[:-1] ks_val = np.max(ks_curve) return ks_val, fpr, tpr, threshold, cuts class BiClassMetric(object): def __init__(self, cut_method='step', remove_duplicate=False, pos_label=1): assert cut_method in ['step', 'quantile'] self.cut_method = cut_method self.remove_duplicate = remove_duplicate # available when cut_method is quantile self.pos_label = pos_label def prepare_confusion_mat(self, labels, scores, add_to_end=True, ): sorted_labels, sorted_scores = sort_score_and_label(labels, scores) score_threshold, cuts = None, None if self.cut_method == 'step': score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01) if add_to_end: score_threshold.append(min(score_threshold) - 0.001) cuts.append(1) elif self.cut_method == 'quantile': score_threshold = ThresholdCutter.cut_by_quantile(sorted_scores, remove_duplicate=self.remove_duplicate) score_threshold = list(np.flip(score_threshold)) confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold, ret=['tp', 'fp', 'fn', 'tn'], pos_label=self.pos_label) return confusion_mat, score_threshold, cuts def compute(self, labels, scores, ): confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores, ) metric_scores = self.compute_metric_from_confusion_mat(confusion_mat) return list(metric_scores), score_threshold, cuts def compute_metric_from_confusion_mat(self, *args): raise NotImplementedError() class Lift(BiClassMetric): """ Compute lift """ @staticmethod def _lift_helper(val): tp, fp, fn, tn, labels_num = val[0], val[1], val[2], val[3], val[4] lift_x_type, lift_y_type = [], [] for label_type in ['1', '0']: if label_type == '0': tp, tn = tn, tp fp, fn = fn, fp if labels_num == 0: lift_x = 1 denominator = 1 else: lift_x = (tp + fp) / labels_num denominator = (tp + fn) / labels_num if tp + fp == 0: numerator = 1 else: numerator = tp / (tp + fp) if denominator == 0: lift_y = sys.float_info.max else: lift_y = numerator / denominator lift_x_type.insert(0, lift_x) lift_y_type.insert(0, lift_y) return lift_x_type, lift_y_type def compute(self, labels, pred_scores, pos_label=1): confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, ) lifts_y, lifts_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels), ) return lifts_y, lifts_x, list(score_threshold) def compute_metric_from_confusion_mat(self, confusion_mat, labels_len, ): labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len rs = map(self._lift_helper, zip(confusion_mat['tp'], confusion_mat['fp'], confusion_mat['fn'], confusion_mat['tn'], labels_nums)) rs = list(rs) lifts_x, lifts_y = [i[0] for i in rs], [i[1] for i in rs] return lifts_y, lifts_x class Gain(BiClassMetric): """ Compute Gain """ @staticmethod def _gain_helper(val): tp, fp, fn, tn, num_label = val[0], val[1], val[2], val[3], val[4] gain_x_type, gain_y_type = [], [] for pos_label in ['1', '0']: if pos_label == '0': tp, tn = tn, tp fp, fn = fn, fp if num_label == 0: gain_x = 1 else: gain_x = float((tp + fp) / num_label) num_positives = tp + fn if num_positives == 0: gain_y = 1 else: gain_y = float(tp / num_positives) gain_x_type.insert(0, gain_x) gain_y_type.insert(0, gain_y) return gain_x_type, gain_y_type def compute(self, labels, pred_scores, pos_label=1): confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, ) gain_y, gain_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels)) return gain_y, gain_x, list(score_threshold) def compute_metric_from_confusion_mat(self, confusion_mat, labels_len): labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len rs = map(self._gain_helper, zip(confusion_mat['tp'], confusion_mat['fp'], confusion_mat['fn'], confusion_mat['tn'], labels_nums)) rs = list(rs) gain_x, gain_y = [i[0] for i in rs], [i[1] for i in rs] return gain_y, gain_x class BiClassPrecision(BiClassMetric): """ Compute binary classification precision """ def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True, impute_val=1.0): numerator = confusion_mat['tp'] denominator = (confusion_mat['tp'] + confusion_mat['fp']) zero_indexes = (denominator == 0) denominator[zero_indexes] = 1 precision_scores = numerator / denominator precision_scores[zero_indexes] = impute_val # impute_val is for prettifying when drawing pr curves if formatted: score_formatted = [[0, i] for i in precision_scores] return score_formatted else: return precision_scores class MultiClassPrecision(object): """ Compute multi-classification precision """ def compute(self, labels, pred_scores): all_labels = sorted(set(labels).union(set(pred_scores))) return precision_score(labels, pred_scores, average=None), all_labels class BiClassRecall(BiClassMetric): """ Compute binary classification recall """ def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True): recall_scores = confusion_mat['tp'] / (confusion_mat['tp'] + confusion_mat['fn']) if formatted: score_formatted = [[0, i] for i in recall_scores] return score_formatted else: return recall_scores class MultiClassRecall(object): """ Compute multi-classification recall """ def compute(self, labels, pred_scores): all_labels = sorted(set(labels).union(set(pred_scores))) return recall_score(labels, pred_scores, average=None), all_labels class BiClassAccuracy(BiClassMetric): """ Compute binary classification accuracy """ def compute(self, labels, scores, normalize=True): confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores) metric_scores = self.compute_metric_from_confusion_mat(confusion_mat, normalize=normalize) return list(metric_scores), score_threshold[: len(metric_scores)], cuts[: len(metric_scores)] def compute_metric_from_confusion_mat(self, confusion_mat, normalize=True): rs = (confusion_mat['tp'] + confusion_mat['tn']) / \ (confusion_mat['tp'] + confusion_mat['tn'] + confusion_mat['fn'] + confusion_mat['fp']) if normalize \ else (confusion_mat['tp'] + confusion_mat['tn']) return rs[:-1] class MultiClassAccuracy(object): """ Compute multi-classification accuracy """ def compute(self, labels, pred_scores, normalize=True): return accuracy_score(labels, pred_scores, normalize=normalize) class FScore(object): """ Compute F score from bi-class confusion mat """ @staticmethod def compute(labels, pred_scores, beta=1, pos_label=1): sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores) _, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01) fixed_interval_threshold = ThresholdCutter.fixed_interval_threshold() confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, fixed_interval_threshold, ret=['tp', 'fp', 'fn', 'tn'], pos_label=pos_label) precision_computer = BiClassPrecision() recall_computer = BiClassRecall() p_score = precision_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False) r_score = recall_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False) beta_2 = beta * beta denominator = (beta_2 * p_score + r_score) denominator[denominator == 0] = 1e-6 # in case denominator is 0 numerator = (1 + beta_2) * (p_score * r_score) f_score = numerator / denominator return f_score, fixed_interval_threshold, cuts class PSI(object): def compute(self, train_scores: list, validate_scores: list, train_labels=None, validate_labels=None, debug=False, str_intervals=False, round_num=3, pos_label=1): """ train/validate scores: predicted scores on train/validate set train/validate labels: true labels debug: print debug message if train&validate labels are not None, count positive sample percentage in every interval pos_label: pos label round_num: round number str_intervals: return str intervals """ train_scores = np.array(train_scores) validate_scores = np.array(validate_scores) quantile_points = ThresholdCutter().cut_by_quantile(train_scores) train_count = self.quantile_binning_and_count(train_scores, quantile_points) validate_count = self.quantile_binning_and_count(validate_scores, quantile_points) train_pos_perc, validate_pos_perc = None, None if train_labels is not None and validate_labels is not None: assert len(train_labels) == len(train_scores) and len(validate_labels) == len(validate_scores) train_labels, validate_labels = np.array(train_labels), np.array(validate_labels) train_pos_count = self.quantile_binning_and_count(train_scores[train_labels == pos_label], quantile_points) validate_pos_count = self.quantile_binning_and_count(validate_scores[validate_labels == pos_label], quantile_points) train_pos_perc = np.array(train_pos_count['count']) / np.array(train_count['count']) validate_pos_perc = np.array(validate_pos_count['count']) / np.array(validate_count['count']) # handle special cases train_pos_perc[train_pos_perc == np.inf] = -1 validate_pos_perc[validate_pos_perc == np.inf] = -1 train_pos_perc[np.isnan(train_pos_perc)] = 0 validate_pos_perc[np.isnan(validate_pos_perc)] = 0 if debug: print(train_count) print(validate_count) assert (train_count['interval'] == validate_count['interval']), 'train count interval is not equal to ' \ 'validate count interval' expected_interval = np.array(train_count['count']) actual_interval = np.array(validate_count['count']) expected_interval = expected_interval.astype(np.float) actual_interval = actual_interval.astype(np.float) psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage \ = self.psi_score(expected_interval, actual_interval, len(train_scores), len(validate_scores)) intervals = train_count['interval'] if not str_intervals else PSI.intervals_to_str(train_count['interval'], round_num=round_num) if train_labels is None and validate_labels is None: return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \ intervals else: return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \ train_pos_perc, validate_pos_perc, intervals @staticmethod def quantile_binning_and_count(scores, quantile_points): """ left edge and right edge of last interval are closed """ assert len(quantile_points) >= 2 left_bounds = copy.deepcopy(quantile_points[:-1]) right_bounds = copy.deepcopy(quantile_points[1:]) last_interval_left = left_bounds.pop() last_interval_right = right_bounds.pop() bin_result_1, bin_result_2 = None, None if len(left_bounds) != 0 and len(right_bounds) != 0: bin_result_1 = pd.cut(scores, pd.IntervalIndex.from_arrays(left_bounds, right_bounds, closed='left')) bin_result_2 = pd.cut(scores, pd.IntervalIndex.from_arrays([last_interval_left], [last_interval_right], closed='both')) count1 = None if bin_result_1 is None else bin_result_1.value_counts().reset_index() count2 = bin_result_2.value_counts().reset_index() # if predict scores are the same, count1 will be None, only one interval exists final_interval = list(count1['index']) + list(count2['index']) if count1 is not None else list(count2['index']) final_count = list(count1[0]) + list(count2[0]) if count1 is not None else list(count2[0]) rs = {'interval': final_interval, 'count': final_count} return rs @staticmethod def interval_psi_score(val): expected, actual = val[0], val[1] return (actual - expected) * np.log(actual / expected) @staticmethod def intervals_to_str(intervals, round_num=3): str_intervals = [] for interval in intervals: left_bound, right_bound = '[', ']' if interval.closed == 'left': right_bound = ')' elif interval.closed == 'right': left_bound = '(' str_intervals.append("{}{}, {}{}".format(left_bound, round(interval.left, round_num), round(interval.right, round_num), right_bound)) return str_intervals @staticmethod def psi_score(expected_interval: np.ndarray, actual_interval: np.ndarray, expect_total_num, actual_total_num, debug=False): expected_interval[expected_interval == 0] = 1e-6 # in case no overlap samples actual_interval[actual_interval == 0] = 1e-6 # in case no overlap samples expected_percentage = expected_interval / expect_total_num actual_percentage = actual_interval / actual_total_num if debug: print(expected_interval) print(actual_interval) print(expected_percentage) print(actual_percentage) psi_scores = list(map(PSI.interval_psi_score, zip(expected_percentage, actual_percentage))) psi_scores = np.array(psi_scores) total_psi = psi_scores.sum() return psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage class KSTest(object): @staticmethod def compute(train_scores, validate_scores): """ train/validate scores: predicted scores on train/validate set """ return stats.ks_2samp(train_scores, validate_scores).pvalue class AveragePrecisionScore(object): @staticmethod def compute(train_scores, validate_scores, train_labels, validate_labels): """ train/validate scores: predicted scores on train/validate set train/validate labels: true labels """ train_mAP = average_precision_score(train_labels, train_scores) validate_mAP = average_precision_score(validate_labels, validate_scores) return abs(train_mAP - validate_mAP) class Distribution(object): @staticmethod def compute(train_scores: list, validate_scores: list): """ train/validate scores: predicted scores on train/validate set """ train_scores = np.array(train_scores) validate_scores = np.array(validate_scores) validate_scores = dict(validate_scores) count = 0 for key, value in train_scores: if key in validate_scores.keys() and value != validate_scores.get(key): count += 1 return count / len(train_scores)
21,650
35.758913
119
py
FATE
FATE-master/python/federatedml/evaluation/metrics/__init__.py
0
0
0
py
FATE
FATE-master/python/federatedml/callbacks/callback_list.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.callbacks.validation_strategy import ValidationStrategy from federatedml.callbacks.model_checkpoint import ModelCheckpoint from federatedml.param.callback_param import CallbackParam from federatedml.util import LOGGER class CallbackList(object): def __init__(self, role, mode, model): self.role = role self.mode = mode self.model = model self.callback_list = [] def init_callback_list(self, callback_param: CallbackParam): LOGGER.debug(f"self_model: {self.model}") if "EarlyStopping" in callback_param.callbacks or \ "PerformanceEvaluate" in callback_param.callbacks: has_arbiter = self.model.component_properties.has_arbiter validation_strategy = ValidationStrategy(self.role, self.mode, callback_param.validation_freqs, callback_param.early_stopping_rounds, callback_param.use_first_metric_only, arbiter_comm=has_arbiter) self.callback_list.append(validation_strategy) if "ModelCheckpoint" in callback_param.callbacks: model_checkpoint = ModelCheckpoint(model=self.model, save_freq=callback_param.save_freq) self.callback_list.append(model_checkpoint) def get_validation_strategy(self): for callback_func in self.callback_list: if isinstance(callback_func, ValidationStrategy): return callback_func return None def on_train_begin(self, train_data=None, validate_data=None): for callback_func in self.callback_list: callback_func.on_train_begin(train_data, validate_data) def on_epoch_end(self, epoch): for callback_func in self.callback_list: callback_func.on_epoch_end(self.model, epoch) def on_epoch_begin(self, epoch): for callback_func in self.callback_list: callback_func.on_epoch_begin(self.model, epoch) def on_train_end(self): for callback_func in self.callback_list: callback_func.on_train_end(self.model)
2,901
42.313433
90
py
FATE
FATE-master/python/federatedml/callbacks/validation_strategy.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ # # ################################################################################ import copy from federatedml.util import LOGGER from federatedml.util import consts from federatedml.param.evaluation_param import EvaluateParam from federatedml.evaluation.performance_recorder import PerformanceRecorder from federatedml.transfer_variable.transfer_class.validation_strategy_transfer_variable import \ ValidationStrategyVariable from federatedml.callbacks.callback_base import CallbackBase from federatedml.feature.instance import Instance class ValidationStrategy(CallbackBase): """ This module is used for evaluating the performance of model during training process. it will be called only in fit process of models. Attributes ---------- validation_freqs: None or positive integer or container object in python. Do validation in training process or Not. if equals None, will not do validation in train process; if equals positive integer, will validate data every validation_freqs epochs passes; if container object in python, will validate data if epochs belong to this container. e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15. Default: None train_data: None or Table, if train_data not equal to None, and judge need to validate data according to validation_freqs, training data will be used for evaluating validate_data: None or Table, if validate_data not equal to None, and judge need to validate data according to validation_freqs, validate data will be used for evaluating """ def __init__(self, role=None, mode=None, validation_freqs=None, early_stopping_rounds=None, use_first_metric_only=False, arbiter_comm=True): self.validation_freqs = validation_freqs self.role = role self.mode = mode self.flowid = '' self.train_data = None self.validate_data = None # early stopping related vars self.arbiter_comm = arbiter_comm self.sync_status = False self.early_stopping_rounds = early_stopping_rounds self.use_first_metric_only = use_first_metric_only self.first_metric = None self._evaluation_summary = {} # precompute scores self.cached_train_scores = None self.cached_validate_scores = None self.use_precompute_train_scores = False self.use_precompute_validate_scores = False if early_stopping_rounds is not None: if early_stopping_rounds <= 0: raise ValueError('early stopping error should be larger than 0') if self.mode == consts.HOMO: raise ValueError('early stopping is not supported for homo algorithms') self.sync_status = True LOGGER.debug("early stopping round is {}".format(self.early_stopping_rounds)) self.cur_best_model = None self.best_iteration = -1 self.metric_best_model = {} # best model of a certain metric self.metric_best_iter = {} # best iter of a certain metric self.performance_recorder = PerformanceRecorder() # recorder to record performances self.transfer_inst = ValidationStrategyVariable() def set_train_data(self, train_data): self.train_data = train_data def set_validate_data(self, validate_data): self.validate_data = validate_data if self.early_stopping_rounds and self.validate_data is None: raise ValueError('validate data is needed when early stopping is enabled') def set_flowid(self, flowid): self.flowid = flowid def need_run_validation(self, epoch): LOGGER.debug("validation_freqs is {}".format(self.validation_freqs)) if not self.validation_freqs: return False if isinstance(self.validation_freqs, int): return (epoch + 1) % self.validation_freqs == 0 return epoch in self.validation_freqs @staticmethod def generate_flowid(prefix, epoch, keywords="iteration", data_type="train"): return "_".join([prefix, keywords, str(epoch), data_type]) @staticmethod def make_data_set_name(need_cv, need_run_ovr, model_flowid, epoch): data_iteration_name = "_".join(["iteration", str(epoch)]) if not need_cv and not need_run_ovr: return data_iteration_name if need_cv: if not need_run_ovr: prefix = "_".join(["fold", model_flowid.split(".", -1)[-1]]) else: prefix = "_".join(["fold", model_flowid.split(".", -1)[-2]]) prefix = ".".join([prefix, model_flowid.split(".", -1)[-1]]) else: prefix = model_flowid.split(".", -1)[-1] return ".".join([prefix, data_iteration_name]) @staticmethod def extract_best_model(model): best_model = model.export_model() return {'model': {'best_model': best_model}} if best_model is not None else None def is_best_performance_updated(self, use_first_metric_only=False): if len(self.performance_recorder.no_improvement_round.items()) == 0: return False for metric, no_improve_val in self.performance_recorder.no_improvement_round.items(): if no_improve_val != 0: return False if use_first_metric_only: break return True def update_early_stopping_status(self, iteration, model): first_metric = True if self.role == consts.GUEST: LOGGER.info('showing early stopping status, {} shows cur best performances: {}'.format( self.role, self.performance_recorder.cur_best_performance)) LOGGER.info('showing early stopping status, {} shows early stopping no improve rounds: {}'.format( self.role, self.performance_recorder.no_improvement_round)) for metric, no_improve_round in self.performance_recorder.no_improvement_round.items(): if no_improve_round == 0: self.metric_best_iter[metric] = iteration self.metric_best_model[metric] = self.extract_best_model(model) LOGGER.info('best model of metric {} is now updated to {}'.format(metric, iteration)) # if early stopping is not triggered, return best model of first metric by default if first_metric: LOGGER.info('default best model: metric {}, iter {}'.format(metric, iteration)) self.cur_best_model = self.metric_best_model[metric] self.best_iteration = iteration first_metric = False def check_early_stopping(self,): """ check if satisfy early_stopping_round Returns bool """ LOGGER.info('checking early stopping') no_improvement_dict = self.performance_recorder.no_improvement_round for metric in no_improvement_dict: if no_improvement_dict[metric] >= self.early_stopping_rounds: self.best_iteration = self.metric_best_iter[metric] self.cur_best_model = self.metric_best_model[metric] LOGGER.info('early stopping triggered, model of iter {} is chosen because metric {} satisfied' 'stop condition'.format(self.best_iteration, metric)) return True return False def sync_performance_recorder(self, epoch): """ sync synchronize self.performance_recorder """ if self.mode == consts.HETERO and self.role == consts.GUEST: recorder_to_send = copy.deepcopy(self.performance_recorder) recorder_to_send.cur_best_performance = None if self.arbiter_comm: self.transfer_inst.validation_status.remote(recorder_to_send, idx=-1, suffix=(epoch,)) else: self.transfer_inst.validation_status.remote(recorder_to_send, idx=-1, suffix=(epoch,), role=consts.HOST) elif self.mode == consts.HETERO: self.performance_recorder = self.transfer_inst.validation_status.get(idx=-1, suffix=(epoch,))[0] else: return def need_stop(self): return False if not self.early_stopping_rounds else self.check_early_stopping() def has_saved_best_model(self): return (self.early_stopping_rounds is not None) and (self.cur_best_model is not None) def export_best_model(self): if self.has_saved_best_model(): return self.cur_best_model else: return None def summary(self): return self._evaluation_summary def update_metric_summary(self, metric_dict): iter_name = list(metric_dict.keys())[0] metric_dict = metric_dict[iter_name] if len(self._evaluation_summary) == 0: self._evaluation_summary = {namespace: {} for namespace in metric_dict} for namespace in metric_dict: for metric_name in metric_dict[namespace]: epoch_metric = metric_dict[namespace][metric_name] if metric_name not in self._evaluation_summary[namespace]: self._evaluation_summary[namespace][metric_name] = [] self._evaluation_summary[namespace][metric_name].append(epoch_metric) def evaluate(self, predicts, model, epoch): evaluate_param: EvaluateParam = model.get_metrics_param() evaluate_param.check_single_value_default_metric() from federatedml.evaluation.evaluation import Evaluation eval_obj = Evaluation() eval_type = evaluate_param.eval_type metric_list = evaluate_param.metrics if self.early_stopping_rounds and self.use_first_metric_only and len(metric_list) != 0: single_metric_list = None if eval_type == consts.BINARY: single_metric_list = consts.BINARY_SINGLE_VALUE_METRIC elif eval_type == consts.REGRESSION: single_metric_list = consts.REGRESSION_SINGLE_VALUE_METRICS elif eval_type == consts.MULTY: single_metric_list = consts.MULTI_SINGLE_VALUE_METRIC for metric in metric_list: if metric in single_metric_list: self.first_metric = metric LOGGER.debug('use {} as first metric'.format(self.first_metric)) break eval_obj._init_model(evaluate_param) eval_obj.set_tracker(model.tracker) data_set_name = self.make_data_set_name(model.need_cv, model.callback_one_vs_rest, model.flowid, epoch) eval_data = {data_set_name: predicts} eval_result_dict = eval_obj.fit(eval_data, return_result=True) epoch_summary = eval_obj.summary() self.update_metric_summary(epoch_summary) eval_obj.save_data() LOGGER.debug("end of eval") return eval_result_dict @staticmethod def _add_data_type_map_func(value, data_type): new_pred_rs = Instance(features=value.features + [data_type], inst_id=value.inst_id) return new_pred_rs @staticmethod def add_data_type(predicts, data_type: str): """ predict data add data_type """ predicts = predicts.mapValues(lambda value: ValidationStrategy._add_data_type_map_func(value, data_type)) return predicts def handle_precompute_scores(self, precompute_scores, data_type): if self.mode == consts.HETERO and self.role == consts.HOST: return None if self.role == consts.ARBITER: return None LOGGER.debug('using precompute scores') return self.add_data_type(precompute_scores, data_type) def get_predict_result(self, model, epoch, data, data_type: str): if not data: return LOGGER.debug("start to evaluate data {}".format(data_type)) model_flowid = model.flowid # model_flowid = ".".join(model.flowid.split(".", -1)[1:]) flowid = self.generate_flowid(model_flowid, epoch, "iteration", data_type) model.set_flowid(flowid) predicts = model.predict(data) model.set_flowid(model_flowid) if self.mode == consts.HOMO and self.role == consts.ARBITER: pass elif self.mode == consts.HETERO and self.role == consts.HOST: pass else: predicts = self.add_data_type(predicts, data_type) return predicts def set_precomputed_train_scores(self, train_scores): self.use_precompute_train_scores = True self.cached_train_scores = train_scores def set_precomputed_validate_scores(self, validate_scores): self.use_precompute_validate_scores = True self.cached_validate_scores = validate_scores def validate(self, model, epoch): """ :param model: model instance, which has predict function :param epoch: int, epoch idx for generating flow id """ LOGGER.debug( "begin to check validate status, need_run_validation is {}".format( self.need_run_validation(epoch))) if not self.need_run_validation(epoch): return if self.mode == consts.HOMO and self.role == consts.ARBITER: return if not self.use_precompute_train_scores: # call model.predict() train_predicts = self.get_predict_result(model, epoch, self.train_data, "train") else: # use precomputed scores train_predicts = self.handle_precompute_scores(self.cached_train_scores, 'train') if not self.use_precompute_validate_scores: # call model.predict() validate_predicts = self.get_predict_result(model, epoch, self.validate_data, "validate") else: # use precomputed scores validate_predicts = self.handle_precompute_scores(self.cached_validate_scores, 'validate') if train_predicts is not None or validate_predicts is not None: predicts = train_predicts if validate_predicts: predicts = predicts.union(validate_predicts) # running evaluation eval_result_dict = self.evaluate(predicts, model, epoch) LOGGER.debug('showing eval_result_dict here') LOGGER.debug(eval_result_dict) if self.early_stopping_rounds: if len(eval_result_dict) == 0: raise ValueError( "eval_result len is 0, no single value metric detected for early stopping checking") if self.use_first_metric_only: if self.first_metric: eval_result_dict = {self.first_metric: eval_result_dict[self.first_metric]} else: LOGGER.warning('use first metric only but no single metric found in metric list') self.performance_recorder.update(eval_result_dict) if self.sync_status: self.sync_performance_recorder(epoch) if self.early_stopping_rounds and self.mode == consts.HETERO: self.update_early_stopping_status(epoch, model) def on_train_begin(self, train_data=None, validate_data=None): if self.role != consts.ARBITER: self.set_train_data(train_data) self.set_validate_data(validate_data) def on_epoch_end(self, model, epoch): LOGGER.debug('running validation') self.validate(model, epoch) if self.need_stop(): LOGGER.debug('early stopping triggered') model.callback_variables.stop_training = True def on_train_end(self, model): if self.has_saved_best_model(): model.load_model(self.cur_best_model) model.callback_variables.best_iteration = self.best_iteration model.callback_variables.validation_summary = self.summary()
16,887
39.891041
119
py
FATE
FATE-master/python/federatedml/callbacks/model_checkpoint.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.callbacks.callback_base import CallbackBase from federatedml.util import LOGGER class ModelCheckpoint(CallbackBase): def __init__(self, model, save_freq): self.model = model if save_freq == "epoch": save_freq = 1 self.save_freq = save_freq self.save_count = 0 def add_checkpoint(self, step_index, step_name=None, to_save_model=None): step_name = step_name if step_name is not None else self.model.step_name to_save_model = to_save_model if to_save_model is not None else self.model.export_serialized_models() _checkpoint = self.model.checkpoint_manager.new_checkpoint(step_index=step_index, step_name=step_name) _checkpoint.save(to_save_model) LOGGER.debug(f"current checkpoint num: {self.model.checkpoint_manager.checkpoints_number}") return _checkpoint def on_epoch_end(self, model, epoch): if epoch % self.save_freq == 0: self.add_checkpoint(step_index=epoch) self.save_count += 1
1,657
39.439024
110
py
FATE
FATE-master/python/federatedml/callbacks/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
614
40
75
py
FATE
FATE-master/python/federatedml/callbacks/callback_base.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class CallbackBase(object): def on_train_begin(self, train_data=None, validate_data=None): pass def on_epoch_begin(self, model, epoch): pass def on_epoch_end(self, model, epoch): pass def on_train_end(self, model): pass
887
29.62069
75
py
FATE
FATE-master/python/federatedml/model_selection/k_fold.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import functools import numpy as np from sklearn.model_selection import KFold as sk_KFold from fate_arch.session import computing_session as session from federatedml.evaluation.evaluation import Evaluation from federatedml.model_selection.cross_validate import BaseCrossValidator from federatedml.model_selection.indices import collect_index from federatedml.transfer_variable.transfer_class.cross_validation_transfer_variable import \ CrossValidationTransferVariable from federatedml.util import LOGGER from federatedml.util import consts class KFold(BaseCrossValidator): def __init__(self): super(KFold, self).__init__() self.model_param = None self.n_splits = 1 self.shuffle = True self.random_seed = 1 self.fold_history = None def _init_model(self, param): self.model_param = param self.n_splits = param.n_splits self.mode = param.mode self.role = param.role self.shuffle = param.shuffle self.random_seed = param.random_seed self.output_fold_history = param.output_fold_history self.history_value_type = param.history_value_type # self.evaluate_param = param.evaluate_param # np.random.seed(self.random_seed) def split(self, data_inst): # header = data_inst.schema.get('header') schema = data_inst.schema data_sids_iter, data_size = collect_index(data_inst) data_sids = [] key_type = None for sid, _ in data_sids_iter: if key_type is None: key_type = type(sid) data_sids.append(sid) data_sids = np.array(data_sids) # if self.shuffle: # np.random.shuffle(data_sids) random_state = self.random_seed if self.shuffle else None kf = sk_KFold(n_splits=self.n_splits, shuffle=self.shuffle, random_state=random_state) n = 0 for train, test in kf.split(data_sids): train_sids = data_sids[train] test_sids = data_sids[test] n += 1 train_sids_table = [(key_type(x), 1) for x in train_sids] test_sids_table = [(key_type(x), 1) for x in test_sids] train_table = session.parallelize(train_sids_table, include_key=True, partition=data_inst.partitions) train_data = data_inst.join(train_table, lambda x, y: x) test_table = session.parallelize(test_sids_table, include_key=True, partition=data_inst.partitions) test_data = data_inst.join(test_table, lambda x, y: x) train_data.schema = schema test_data.schema = schema yield train_data, test_data @staticmethod def generate_new_id(id, fold_num, data_type): return f"{id}#fold{fold_num}#{data_type}" def transform_history_data(self, data, predict_data, fold_num, data_type): if self.history_value_type == "score": if predict_data is not None: history_data = predict_data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), v)) history_data.schema = copy.deepcopy(predict_data.schema) else: history_data = data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), fold_num)) schema = copy.deepcopy(data.schema) schema["header"] = ["fold_num"] history_data.schema = schema elif self.history_value_type == "instance": history_data = data.map(lambda k, v: (KFold.generate_new_id(k, fold_num, data_type), v)) history_data.schema = copy.deepcopy(data.schema) else: raise ValueError(f"unknown history value type") return history_data @staticmethod def _append_name(instance, name): new_inst = copy.deepcopy(instance) new_inst.features.append(name) return new_inst def run(self, component_parameters, data_inst, original_model, host_do_evaluate): self._init_model(component_parameters) if data_inst is None: self._arbiter_run(original_model) return total_data_count = data_inst.count() LOGGER.debug(f"data_inst count: {total_data_count}") if self.output_fold_history: if total_data_count * self.n_splits > consts.MAX_SAMPLE_OUTPUT_LIMIT: LOGGER.warning( f"max sample output limit {consts.MAX_SAMPLE_OUTPUT_LIMIT} exceeded with n_splits ({self.n_splits}) * instance_count ({total_data_count})") if self.mode == consts.HOMO or self.role == consts.GUEST: data_generator = self.split(data_inst) else: data_generator = [(data_inst, data_inst)] * self.n_splits fold_num = 0 summary_res = {} for train_data, test_data in data_generator: model = copy.deepcopy(original_model) LOGGER.debug("In CV, set_flowid flowid is : {}".format(fold_num)) model.set_flowid(fold_num) model.set_cv_fold(fold_num) LOGGER.info("KFold fold_num is: {}".format(fold_num)) if self.mode == consts.HETERO: train_data = self._align_data_index(train_data, model.flowid, consts.TRAIN_DATA) LOGGER.info("Train data Synchronized") test_data = self._align_data_index(test_data, model.flowid, consts.TEST_DATA) LOGGER.info("Test data Synchronized") train_data_count = train_data.count() test_data_count = test_data.count() LOGGER.debug(f"train_data count: {train_data_count}") if train_data_count + test_data_count != total_data_count: raise EnvironmentError("In cv fold: {}, train count: {}, test count: {}, original data count: {}." "Thus, 'train count + test count = total count' condition is not satisfied" .format(fold_num, train_data_count, test_data_count, total_data_count)) this_flowid = 'train.' + str(fold_num) LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid)) model.set_flowid(this_flowid) model.fit(train_data, test_data) this_flowid = 'predict_train.' + str(fold_num) LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid)) model.set_flowid(this_flowid) train_pred_res = model.predict(train_data) # if train_pred_res is not None: if self.role == consts.GUEST or host_do_evaluate: fold_name = "_".join(['train', 'fold', str(fold_num)]) f = functools.partial(self._append_name, name='train') train_pred_res = train_pred_res.mapValues(f) train_pred_res = model.set_predict_data_schema(train_pred_res, train_data.schema) # LOGGER.debug(f"train_pred_res schema: {train_pred_res.schema}") self.evaluate(train_pred_res, fold_name, model) this_flowid = 'predict_validate.' + str(fold_num) LOGGER.debug("In CV, set_flowid flowid is : {}".format(this_flowid)) model.set_flowid(this_flowid) test_pred_res = model.predict(test_data) # if pred_res is not None: if self.role == consts.GUEST or host_do_evaluate: fold_name = "_".join(['validate', 'fold', str(fold_num)]) f = functools.partial(self._append_name, name='validate') test_pred_res = test_pred_res.mapValues(f) test_pred_res = model.set_predict_data_schema(test_pred_res, test_data.schema) # LOGGER.debug(f"train_pred_res schema: {test_pred_res.schema}") self.evaluate(test_pred_res, fold_name, model) LOGGER.debug("Finish fold: {}".format(fold_num)) if self.output_fold_history: LOGGER.debug(f"generating fold history for fold {fold_num}") fold_train_data = self.transform_history_data(train_data, train_pred_res, fold_num, "train") fold_validate_data = self.transform_history_data(test_data, test_pred_res, fold_num, "validate") fold_history_data = fold_train_data.union(fold_validate_data) fold_history_data.schema = fold_train_data.schema if self.fold_history is None: self.fold_history = fold_history_data else: new_fold_history = self.fold_history.union(fold_history_data) new_fold_history.schema = fold_history_data.schema self.fold_history = new_fold_history summary_res[f"fold_{fold_num}"] = model.summary() fold_num += 1 summary_res['fold_num'] = fold_num LOGGER.debug("Finish all fold running") original_model.set_summary(summary_res) if self.output_fold_history: LOGGER.debug(f"output data schema: {self.fold_history.schema}") # LOGGER.debug(f"output data: {list(self.fold_history.collect())}") # LOGGER.debug(f"output data is: {self.fold_history}") return self.fold_history else: return data_inst def _arbiter_run(self, original_model): for fold_num in range(self.n_splits): LOGGER.info("KFold flowid is: {}".format(fold_num)) model = copy.deepcopy(original_model) this_flowid = 'train.' + str(fold_num) model.set_flowid(this_flowid) model.set_cv_fold(fold_num) model.fit(None) this_flowid = 'predict_train.' + str(fold_num) model.set_flowid(this_flowid) model.predict(None) this_flowid = 'predict_validate.' + str(fold_num) model.set_flowid(this_flowid) model.predict(None) def _align_data_index(self, data_instance, flowid, data_application=None): schema = data_instance.schema if data_application is None: # LOGGER.warning("not data_application!") # return raise ValueError("In _align_data_index, data_application should be provided.") transfer_variable = CrossValidationTransferVariable() if data_application == consts.TRAIN_DATA: transfer_id = transfer_variable.train_sid elif data_application == consts.TEST_DATA: transfer_id = transfer_variable.test_sid else: raise ValueError("In _align_data_index, data_application should be provided.") if self.role == consts.GUEST: data_sid = data_instance.mapValues(lambda v: 1) transfer_id.remote(data_sid, role=consts.HOST, idx=-1, suffix=(flowid,)) LOGGER.info("remote {} to host".format(data_application)) return data_instance elif self.role == consts.HOST: data_sid = transfer_id.get(idx=0, suffix=(flowid,)) LOGGER.info("get {} from guest".format(data_application)) join_data_insts = data_sid.join(data_instance, lambda s, d: d) join_data_insts.schema = schema return join_data_insts def evaluate(self, validate_data, fold_name, model): if validate_data is None: return eval_obj = Evaluation() # LOGGER.debug("In KFold, evaluate_param is: {}".format(self.evaluate_param.__dict__)) # eval_obj._init_model(self.evaluate_param) eval_param = model.get_metrics_param() eval_param.check_single_value_default_metric() eval_obj._init_model(eval_param) eval_obj.set_tracker(model.tracker) validate_data = {fold_name: validate_data} eval_obj.fit(validate_data) eval_obj.save_data()
12,789
43.72028
159
py
FATE
FATE-master/python/federatedml/model_selection/mini_batch.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from fate_arch.session import computing_session as session from federatedml.model_selection import indices from federatedml.util import LOGGER class MiniBatch: def __init__(self, data_inst, batch_size=320, shuffle=False, batch_strategy="full", masked_rate=0): self.batch_data_sids = None self.batch_nums = 0 self.data_inst = data_inst self.all_batch_data = None self.all_index_data = None self.data_sids_iter = None self.batch_data_generator = None self.batch_mutable = False self.batch_masked = False if batch_size == -1: self.batch_size = data_inst.count() else: self.batch_size = batch_size self.__init_mini_batch_data_seperator(data_inst, self.batch_size, batch_strategy, masked_rate, shuffle) def mini_batch_data_generator(self, result='data'): """ Generate mini-batch data or index Parameters ---------- result : str, 'data' or 'index', default: 'data' Specify you want batch data or batch index. Returns ------- A generator that might generate data or index. """ LOGGER.debug("Currently, batch_num is: {}".format(self.batch_nums)) if result == 'index': for index_table in self.all_index_data: yield index_table elif result == "data": for batch_data in self.all_batch_data: yield batch_data else: for batch_data, index_table in zip(self.all_batch_data, self.all_index_data): yield batch_data, index_table # if self.batch_mutable: # self.__generate_batch_data() def __init_mini_batch_data_seperator(self, data_insts, batch_size, batch_strategy, masked_rate, shuffle): self.data_sids_iter, data_size = indices.collect_index(data_insts) self.batch_data_generator = get_batch_generator( data_size, batch_size, batch_strategy, masked_rate, shuffle=shuffle) self.batch_nums = self.batch_data_generator.batch_nums self.batch_mutable = self.batch_data_generator.batch_mutable() self.masked_batch_size = self.batch_data_generator.masked_batch_size if self.batch_mutable is False: self.__generate_batch_data() def generate_batch_data(self): if self.batch_mutable: self.__generate_batch_data() def __generate_batch_data(self): self.all_index_data, self.all_batch_data = self.batch_data_generator.generate_data( self.data_inst, self.data_sids_iter) def get_batch_generator(data_size, batch_size, batch_strategy, masked_rate, shuffle): if batch_size >= data_size: LOGGER.warning("As batch_size >= data size, all batch strategy will be disabled") return FullBatchDataGenerator(data_size, data_size, shuffle=False) # if round((masked_rate + 1) * batch_size) >= data_size: # LOGGER.warning("Masked dataset's batch_size >= data size, batch shuffle will be disabled") # return FullBatchDataGenerator(data_size, data_size, shuffle=False, masked_rate=masked_rate) if batch_strategy == "full": if masked_rate > 0: LOGGER.warning("If using full batch strategy and masked rate > 0, shuffle will always be true") shuffle = True return FullBatchDataGenerator(data_size, batch_size, shuffle=shuffle, masked_rate=masked_rate) else: if shuffle: LOGGER.warning("if use random select batch strategy, shuffle will not work") return RandomBatchDataGenerator(data_size, batch_size, masked_rate) class BatchDataGenerator(object): def __init__(self, data_size, batch_size, shuffle=False, masked_rate=0): self.batch_nums = None self.masked_batch_size = min(data_size, round((1 + masked_rate) * batch_size)) self.batch_size = batch_size self.shuffle = shuffle def batch_mutable(self): return True @staticmethod def _generate_batch_data_with_batch_ids(data_insts, batch_ids, masked_ids=None): batch_index_table = session.parallelize(batch_ids, include_key=True, partition=data_insts.partitions) batch_data_table = batch_index_table.join(data_insts, lambda x, y: y) if masked_ids: masked_index_table = session.parallelize(masked_ids, include_key=True, partition=data_insts.partitions) return masked_index_table, batch_data_table else: return batch_index_table, batch_data_table class FullBatchDataGenerator(BatchDataGenerator): def __init__(self, data_size, batch_size, shuffle=False, masked_rate=0): super(FullBatchDataGenerator, self).__init__(data_size, batch_size, shuffle, masked_rate=masked_rate) self.batch_nums = (data_size + batch_size - 1) // batch_size LOGGER.debug(f"Init Full Batch Data Generator, batch_nums: {self.batch_nums}, batch_size: {self.batch_size}, " f"masked_batch_size: {self.masked_batch_size}, shuffle: {self.shuffle}") def generate_data(self, data_insts, data_sids): if self.shuffle: random.SystemRandom().shuffle(data_sids) index_table = [] batch_data = [] if self.batch_size != self.masked_batch_size: for bid in range(self.batch_nums): batch_ids = data_sids[bid * self.batch_size:(bid + 1) * self.batch_size] masked_ids_set = set() for sid, _ in batch_ids: masked_ids_set.add(sid) possible_ids = random.SystemRandom().sample(data_sids, self.masked_batch_size) for pid, _ in possible_ids: if pid not in masked_ids_set: masked_ids_set.add(pid) if len(masked_ids_set) == self.masked_batch_size: break masked_ids = zip(list(masked_ids_set), [None] * len(masked_ids_set)) masked_index_table, batch_data_table = self._generate_batch_data_with_batch_ids(data_insts, batch_ids, masked_ids) index_table.append(masked_index_table) batch_data.append(batch_data_table) else: for bid in range(self.batch_nums): batch_ids = data_sids[bid * self.batch_size: (bid + 1) * self.batch_size] batch_index_table, batch_data_table = self._generate_batch_data_with_batch_ids(data_insts, batch_ids) index_table.append(batch_index_table) batch_data.append(batch_data_table) return index_table, batch_data def batch_mutable(self): return self.masked_batch_size > self.batch_size or self.shuffle class RandomBatchDataGenerator(BatchDataGenerator): def __init__(self, data_size, batch_size, masked_rate=0): super(RandomBatchDataGenerator, self).__init__(data_size, batch_size, shuffle=False, masked_rate=masked_rate) self.batch_nums = 1 LOGGER.debug(f"Init Random Batch Data Generator, batch_nums: {self.batch_nums}, batch_size: {self.batch_size}, " f"masked_batch_size: {self.masked_batch_size}") def generate_data(self, data_insts, data_sids): if self.masked_batch_size == self.batch_size: batch_ids = random.SystemRandom().sample(data_sids, self.batch_size) batch_index_table, batch_data_table = self._generate_batch_data_with_batch_ids(data_insts, batch_ids) batch_data_table = batch_index_table.join(data_insts, lambda x, y: y) return [batch_index_table], [batch_data_table] else: masked_ids = random.SystemRandom().sample(data_sids, self.masked_batch_size) batch_ids = masked_ids[: self.batch_size] masked_index_table, batch_data_table = self._generate_batch_data_with_batch_ids(data_insts, batch_ids, masked_ids) return [masked_index_table], [batch_data_table]
9,230
44.69802
120
py
FATE
FATE-master/python/federatedml/model_selection/indices.py
""" This module provide some utilized methods that operate the index of distributed data """ # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def collect_index(data_insts): data_sids = data_insts.mapValues(lambda data_inst: None) # data_size = data_sids.count() # Record data nums that left data_sids_iter = data_sids.collect() data_sids_iter = sorted(data_sids_iter, key=lambda x: x[0]) data_size = len(data_sids_iter) return data_sids_iter, data_size
1,049
34
84
py
FATE
FATE-master/python/federatedml/model_selection/start_cross_validation.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.model_selection.k_fold import KFold from federatedml.util import LOGGER def _get_cv_param(model): model.model_param.cv_param.role = model.role model.model_param.cv_param.mode = model.mode return model.model_param.cv_param def run(model, data_instances, host_do_evaluate=False): if not model.need_run: return data_instances kflod_obj = KFold() cv_param = _get_cv_param(model) output_data = kflod_obj.run(cv_param, data_instances, model, host_do_evaluate) LOGGER.info("Finish KFold run") return output_data
1,233
31.473684
82
py
FATE
FATE-master/python/federatedml/model_selection/cross_validate.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from federatedml.util import LOGGER from federatedml.util import consts class BaseCrossValidator(object): def __init__(self): self.mode = None self.role = None def split(self, data_inst): pass def display_cv_result(self, cv_results): LOGGER.debug("cv_result: {}".format(cv_results)) if self.role == consts.GUEST or (self.role == consts.HOST and self.mode == consts.HOMO): format_cv_result = {} for eval_result in cv_results: for eval_name, eval_r in eval_result.items(): if not isinstance(eval_r, list): if eval_name not in format_cv_result: format_cv_result[eval_name] = [] format_cv_result[eval_name].append(eval_r) else: for e_r in eval_r: e_name = "{}_thres_{}".format(eval_name, e_r[0]) if e_name not in format_cv_result: format_cv_result[e_name] = [] format_cv_result[e_name].append(e_r[1]) for eval_name, eva_result_list in format_cv_result.items(): mean_value = np.around(np.mean(eva_result_list), 4) std_value = np.around(np.std(eva_result_list), 4) LOGGER.info("{},evaluate name: {}, mean: {}, std: {}".format(self.role, eval_name, mean_value, std_value))
2,194
39.648148
111
py
FATE
FATE-master/python/federatedml/model_selection/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from federatedml.model_selection.k_fold import KFold from federatedml.model_selection.mini_batch import MiniBatch __all__ = ['MiniBatch', "KFold"]
765
35.47619
75
py
FATE
FATE-master/python/federatedml/model_selection/test/mini_batch_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time import unittest import numpy as np from fate_arch.session import computing_session as session from federatedml.feature.instance import Instance from federatedml.model_selection import MiniBatch from federatedml.model_selection import indices session.init("123") class TestMiniBatch(unittest.TestCase): def prepare_data(self, data_num, feature_num): final_result = [] for i in range(data_num): tmp = i * np.ones(feature_num) inst = Instance(inst_id=i, features=tmp, label=0) tmp = (i, inst) final_result.append(tmp) table = session.parallelize(final_result, include_key=True, partition=3) return table def test_mini_batch_data_generator(self, data_num=100, batch_size=320): t0 = time.time() feature_num = 20 expect_batches = data_num // batch_size # print("expect_batches: {}".format(expect_batches)) data_instances = self.prepare_data(data_num=data_num, feature_num=feature_num) # print("Prepare data time: {}".format(time.time() - t0)) mini_batch_obj = MiniBatch(data_inst=data_instances, batch_size=batch_size) batch_data_generator = mini_batch_obj.mini_batch_data_generator() batch_id = 0 pre_time = time.time() - t0 # print("Prepare mini batch time: {}".format(pre_time)) total_num = 0 for batch_data in batch_data_generator: batch_num = batch_data.count() if batch_id < expect_batches - 1: # print("In mini batch test, batch_num: {}, batch_size:{}".format( # batch_num, batch_size # )) self.assertEqual(batch_num, batch_size) batch_id += 1 total_num += batch_num # curt_time = time.time() # print("One batch time: {}".format(curt_time - pre_time)) # pre_time = curt_time self.assertEqual(total_num, data_num) def test_collect_index(self): data_num = 100 feature_num = 20 data_instances = self.prepare_data(data_num=data_num, feature_num=feature_num) # res = data_instances.mapValues(lambda x: x) data_sids_iter, data_size = indices.collect_index(data_instances) self.assertEqual(data_num, data_size) real_index_num = 0 for sid, _ in data_sids_iter: real_index_num += 1 self.assertEqual(data_num, real_index_num) def test_data_features(self): data_num = 100 feature_num = 20 data_instances = self.prepare_data(data_num=data_num, feature_num=feature_num) local_data = data_instances.collect() idx, data = local_data.__next__() features = data.features self.assertEqual(len(features), feature_num) def test_different_datasize_batch(self): data_nums = [10, 100] batch_size = [1, 2, 10, 32] for d_n in data_nums: for b_s in batch_size: # print("data_nums: {}, batch_size: {}".format(d_n, b_s)) self.test_mini_batch_data_generator(data_num=d_n, batch_size=b_s) if __name__ == '__main__': unittest.main()
3,894
36.095238
86
py
FATE
FATE-master/python/federatedml/model_selection/test/KFold_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import numpy as np from fate_arch.session import computing_session as session from federatedml.feature.instance import Instance from federatedml.model_selection import KFold from federatedml.param.cross_validation_param import CrossValidationParam class TestKFlod(unittest.TestCase): def setUp(self): session.init("123") self.data_num = 1000 self.feature_num = 200 final_result = [] for i in range(self.data_num): tmp = i * np.ones(self.feature_num) inst = Instance(inst_id=i, features=tmp, label=0) tmp = (str(i), inst) final_result.append(tmp) table = session.parallelize(final_result, include_key=True, partition=3) self.table = table def test_split(self): kfold_obj = KFold() kfold_obj.n_splits = 10 kfold_obj.random_seed = 32 # print(self.table, self.table.count()) data_generator = kfold_obj.split(self.table) expect_test_data_num = self.data_num / 10 expect_train_data_num = self.data_num - expect_test_data_num key_list = [] for train_data, test_data in data_generator: train_num = train_data.count() test_num = test_data.count() # print("train_num: {}, test_num: {}".format(train_num, test_num)) self.assertTrue(0.9 * expect_train_data_num < train_num < 1.1 * expect_train_data_num) self.assertTrue(0.9 * expect_test_data_num < test_num < 1.1 * expect_test_data_num) first_key = train_data.first()[0] key_list.append(first_key) # Test random seed work kfold_obj2 = KFold() kfold_obj2.n_splits = 10 kfold_obj2.random_seed = 32 data_generator = kfold_obj.split(self.table) n = 0 for train_data, test_data in data_generator: second_key = train_data.first()[0] first_key = key_list[n] self.assertTrue(first_key == second_key) n += 1 if __name__ == '__main__': unittest.main()
2,773
34.113924
98
py
FATE
FATE-master/python/federatedml/model_selection/test/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/start_stepwise.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from federatedml.model_selection.stepwise.hetero_stepwise import HeteroStepwise from federatedml.util import LOGGER from federatedml.util import consts def _get_stepwise_param(model): model.model_param.stepwise_param.role = model.role model.model_param.stepwise_param.mode = model.mode return model.model_param.stepwise_param def run(model, train_data, validate_data=None): if not model.need_run: return train_data if model.mode == consts.HETERO: step_obj = HeteroStepwise() else: raise ValueError("stepwise currently only support Hetero mode.") stepwise_param = _get_stepwise_param(model) step_obj.run(stepwise_param, train_data, validate_data, model) pred_result = HeteroStepwise.predict(train_data, model) LOGGER.info("Finish running Stepwise") return pred_result
1,501
34.761905
79
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/step.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import numpy as np from federatedml.statistic.data_overview import get_header, get_anonymous_header from federatedml.util import consts from federatedml.util import LOGGER from federatedml.util.data_transform import set_schema class Step(object): def __init__(self): self.feature_list = [] self.step_direction = "" self.n_step = 0 self.n_model = 0 def set_step_info(self, step_info): n_step, n_model = step_info self.n_step = n_step self.n_model = n_model def get_flowid(self): flowid = "train.step{}.model{}".format(self.n_step, self.n_model) return flowid @staticmethod def slice_data_instance(data_instance, feature_mask): """ return data_instance with features at given indices Parameters ---------- data_instance: data Instance object, input data feature_mask: mask to filter data_instance """ data_instance.features = data_instance.features[feature_mask] return data_instance @staticmethod def get_new_schema(original_data, feature_mask): schema = copy.deepcopy(original_data.schema) old_header = get_header(original_data) new_header = [old_header[i] for i in np.where(feature_mask > 0)[0]] schema["header"] = new_header old_anonymous_header = get_anonymous_header(original_data) if old_anonymous_header: new_anonymous_header = [old_anonymous_header[i] for i in np.where(feature_mask > 0)[0]] schema["anonymous_header"] = new_anonymous_header LOGGER.debug(f"given feature_mask: {feature_mask}, new anonymous header is: {new_anonymous_header}") return schema def run(self, original_model, train_data, validate_data, feature_mask): model = copy.deepcopy(original_model) current_flowid = self.get_flowid() model.set_flowid(current_flowid) if original_model.role != consts.ARBITER: curr_train_data = train_data.mapValues(lambda v: Step.slice_data_instance(v, feature_mask)) new_schema = Step.get_new_schema(train_data, feature_mask) # LOGGER.debug("new schema is: {}".format(new_schema)) set_schema(curr_train_data, new_schema) model.header = new_schema.get("header") else: curr_train_data = train_data model.fit(curr_train_data) return model
3,082
35.270588
112
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/hetero_stepwise.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import itertools import uuid import numpy as np from google.protobuf.json_format import MessageToDict from sklearn import metrics from sklearn.linear_model import LogisticRegression, LinearRegression from federatedml.model_base import Metric, MetricMeta from federatedml.evaluation.metrics.regression_metric import IC, IC_Approx from federatedml.model_selection.stepwise.step import Step from federatedml.statistic import data_overview from federatedml.transfer_variable.transfer_class.stepwise_transfer_variable import StepwiseTransferVariable from federatedml.util import consts from federatedml.util import LOGGER class ModelInfo(object): def __init__(self, n_step, n_model, score, loss, direction): self.score = score self.n_step = n_step self.n_model = n_model self.direction = direction self.loss = loss self.uid = str(uuid.uuid1()) def get_score(self): return self.score def get_loss(self): return self.loss def get_key(self): return self.uid class HeteroStepwise(object): def __init__(self): self.mode = None self.role = None self.forward = False self.backward = False self.n_step = 0 self.has_test = False self.n_count = 0 self.stop_stepwise = False self.models = None self.metric_namespace = "train" self.metric_type = "STEPWISE" self.intercept = None self.models = {} self.models_trained = {} self.IC_computer = None self.step_direction = None self.anonymous_header_guest = None self.anonymous_header_host = None def _init_model(self, param): self.model_param = param self.mode = param.mode self.role = param.role self.score_name = param.score_name self.direction = param.direction self.max_step = param.max_step self.nvmin = param.nvmin self.nvmax = param.nvmax self.transfer_variable = StepwiseTransferVariable() self._get_direction() def _get_direction(self): if self.direction == "forward": self.forward = True elif self.direction == "backward": self.backward = True elif self.direction == "both": self.forward = True self.backward = True else: raise ValueError("Wrong stepwise direction given.") def _put_model(self, key, model): """ wrapper to put key, model dict pair into models dict """ model_dict = {'model': {'stepwise': model.export_model()}} self.models[key] = model_dict def _get_model(self, key): """ wrapper to get value of a given model key from models dict """ value = self.models.get(key) return value def _set_k(self): """ Helper function, get the penalty coefficient for AIC/BIC calculation. """ if self.score_name == "aic": self.k = 2 elif self.score_name == "bic": self.k = np.log(self.n_count) else: raise ValueError("Wrong score name given: {}. Only 'aic' or 'bic' acceptable.".format(self.score_name)) @staticmethod def get_dfe(model, str_mask): dfe = sum(HeteroStepwise.string2mask(str_mask)) if model.fit_intercept: dfe += 1 LOGGER.debug("fit_intercept detected, 1 is added to dfe") return dfe def get_step_best(self, step_models): best_score = None best_model = "" for model in step_models: model_info = self.models_trained[model] score = model_info.get_score() if score is None: continue if best_score is None or score < best_score: best_score = score best_model = model LOGGER.info(f"step {self.n_step}, best model {best_model}") return best_model @staticmethod def drop_one(mask_to_drop): for i in np.nonzero(mask_to_drop)[0]: new_mask = np.copy(mask_to_drop) new_mask[i] = 0 if sum(new_mask) > 0: yield new_mask @staticmethod def add_one(mask_to_add): for i in np.where(mask_to_add < 1)[0]: new_mask = np.copy(mask_to_add) new_mask[i] = 1 yield new_mask def check_stop(self, new_host_mask, new_guest_mask, host_mask, guest_mask): # initial step if self.n_step == 0: return False # if model not updated if np.array_equal(new_host_mask, host_mask) and np.array_equal(new_guest_mask, guest_mask): LOGGER.debug("masks not changed, check_stop returns True") return True # if full model is the best if sum(new_host_mask < 1) == 0 and sum(new_guest_mask < 1) == 0 and self.n_step > 0: LOGGER.debug("masks are full model, check_stop returns True") return True # if new best reach variable count lower limit new_total_nv = sum(new_host_mask) + sum(new_guest_mask) total_nv = sum(host_mask) + sum(guest_mask) if new_total_nv == self.nvmin and total_nv >= self.nvmin: LOGGER.debug("variable count min reached, check_stop returns True") return True # if new best reach variable count upper limit if self.nvmax is not None: if new_total_nv == self.nvmax and total_nv <= self.nvmax: LOGGER.debug("variable count max reached, check_stop returns True") return True # if reach max step if self.n_step >= self.max_step: LOGGER.debug("max step reached, check_stop returns True") return True return False def get_intercept_loss(self, model, data): y = np.array([x[1] for x in data.mapValues(lambda v: v.label).collect()]) X = np.ones((len(y), 1)) if model.model_name == 'HeteroLinearRegression' or model.model_name == 'HeteroPoissonRegression': intercept_model = LinearRegression(fit_intercept=False) trained_model = intercept_model.fit(X, y) pred = trained_model.predict(X) loss = metrics.mean_squared_error(y, pred) / 2 elif model.model_name == 'HeteroLogisticRegression': intercept_model = LogisticRegression(penalty='l1', C=1e8, fit_intercept=False, solver='liblinear') trained_model = intercept_model.fit(X, y) pred = trained_model.predict(X) loss = metrics.log_loss(y, pred) else: raise ValueError("Unknown model received. Stepwise stopped.") self.intercept = intercept_model.intercept_ return loss def get_ic_val(self, model, model_mask): if self.role != consts.ARBITER: return None, None if len(model.loss_history) == 0: raise ValueError("Arbiter has no loss history. Stepwise does not support model without total loss.") # get final loss from loss history for criteria calculation loss = model.loss_history[-1] dfe = HeteroStepwise.get_dfe(model, model_mask) ic_val = self.IC_computer.compute(self.k, self.n_count, dfe, loss) if np.isinf(ic_val): raise ValueError("Loss value of infinity obtained. Stepwise stopped.") return loss, ic_val def get_ic_val_guest(self, model, train_data): if not model.fit_intercept: return None, None loss = self.get_intercept_loss(model, train_data) # intercept only model has dfe = 1 dfe = 1 ic_val = self.IC_computer.compute(self.k, self.n_count, dfe, loss) return loss, ic_val def _run_step(self, model, train_data, validate_data, feature_mask, n_model, model_mask): if self.direction == 'forward' and self.n_step == 0: if self.role == consts.GUEST: loss, ic_val = self.get_ic_val_guest(model, train_data) LOGGER.info("step {} n_model {}".format(self.n_step, n_model)) model_info = ModelInfo(self.n_step, n_model, ic_val, loss, self.step_direction) self.models_trained[model_mask] = model_info model_key = model_info.get_key() self._put_model(model_key, model) else: model_info = ModelInfo(self.n_step, n_model, None, None, self.step_direction) self.models_trained[model_mask] = model_info model_key = model_info.get_key() self._put_model(model_key, model) return curr_step = Step() curr_step.set_step_info((self.n_step, n_model)) trained_model = curr_step.run(model, train_data, validate_data, feature_mask) loss, ic_val = self.get_ic_val(trained_model, model_mask) LOGGER.info("step {} n_model {}: ic_val {}".format(self.n_step, n_model, ic_val)) model_info = ModelInfo(self.n_step, n_model, ic_val, loss, self.step_direction) self.models_trained[model_mask] = model_info model_key = model_info.get_key() self._put_model(model_key, trained_model) def sync_data_info(self, data): if self.role == consts.ARBITER: return self.arbiter_sync_data_info() else: return self.client_sync_data_info(data) def arbiter_sync_data_info(self): n_host, j_host, self.anonymous_header_host = self.transfer_variable.host_data_info.get(idx=0) n_guest, j_guest, self.anonymous_header_guest = self.transfer_variable.guest_data_info.get(idx=0) self.n_count = n_host return j_host, j_guest def client_sync_data_info(self, data): n, j = data.count(), data_overview.get_features_shape(data) anonymous_header = data_overview.get_anonymous_header(data) self.n_count = n if self.role == consts.HOST: self.transfer_variable.host_data_info.remote((n, j, anonymous_header), role=consts.ARBITER, idx=0) self.transfer_variable.host_data_info.remote((n, j, anonymous_header), role=consts.GUEST, idx=0) j_host = j n_guest, j_guest, self.anonymous_header_guest = self.transfer_variable.guest_data_info.get(idx=0) self.anonymous_header_host = anonymous_header else: self.transfer_variable.guest_data_info.remote((n, j, anonymous_header), role=consts.ARBITER, idx=0) self.transfer_variable.guest_data_info.remote((n, j, anonymous_header), role=consts.HOST, idx=0) j_guest = j n_host, j_host, self.anonymous_header_host = self.transfer_variable.host_data_info.get(idx=0) self.anonymous_header_guest = anonymous_header return j_host, j_guest def get_to_enter(self, host_mask, guest_mask, all_features): if self.role == consts.GUEST: to_enter = [all_features[i] for i in np.where(guest_mask < 1)[0]] elif self.role == consts.HOST: to_enter = [all_features[i] for i in np.where(host_mask < 1)[0]] else: to_enter = [] return to_enter def update_summary_client(self, model, host_mask, guest_mask, unilateral_features, host_anonym, guest_anonym): step_summary = {} if self.role == consts.GUEST: guest_features = [unilateral_features[i] for i in np.where(guest_mask == 1)[0]] host_features = [host_anonym[i] for i in np.where(host_mask == 1)[0]] elif self.role == consts.HOST: guest_features = [guest_anonym[i] for i in np.where(guest_mask == 1)[0]] host_features = [unilateral_features[i] for i in np.where(host_mask == 1)[0]] else: raise ValueError(f"upload summary on client only applies to host or guest.") step_summary["guest_features"] = guest_features step_summary["host_features"] = host_features model.add_summary(f"step_{self.n_step}", step_summary) def update_summary_arbiter(self, model, loss, ic_val): step_summary = {} step_summary["loss"] = loss step_summary["ic_val"] = ic_val model.add_summary(f"step_{self.n_step}", step_summary) def record_step_best(self, step_best, host_mask, guest_mask, data_instances, model): metas = {"host_mask": host_mask.tolist(), "guest_mask": guest_mask.tolist(), "score_name": self.score_name} metas["number_in"] = int(sum(host_mask) + sum(guest_mask)) metas["direction"] = self.direction metas["n_count"] = int(self.n_count) """host_anonym = [ anonymous_generator.generate_anonymous( fid=i, role='host', model=model) for i in range( len(host_mask))] guest_anonym = [ anonymous_generator.generate_anonymous( fid=i, role='guest', model=model) for i in range( len(guest_mask))] metas["host_features_anonym"] = host_anonym metas["guest_features_anonym"] = guest_anonym """ metas["host_features_anonym"] = self.anonymous_header_host metas["guest_features_anonym"] = self.anonymous_header_guest model_info = self.models_trained[step_best] loss = model_info.get_loss() ic_val = model_info.get_score() metas["loss"] = loss metas["current_ic_val"] = ic_val metas["fit_intercept"] = model.fit_intercept model_key = model_info.get_key() model_dict = self._get_model(model_key) if self.role != consts.ARBITER: all_features = data_instances.schema.get('header') metas["all_features"] = all_features metas["to_enter"] = self.get_to_enter(host_mask, guest_mask, all_features) model_param = list(model_dict.get('model').values())[0].get( model.model_param_name) param_dict = MessageToDict(model_param) metas["intercept"] = param_dict.get("intercept", None) metas["weight"] = param_dict.get("weight", {}) metas["header"] = param_dict.get("header", []) if self.n_step == 0 and self.direction == "forward": metas["intercept"] = self.intercept self.update_summary_client(model, host_mask, guest_mask, all_features, self.anonymous_header_host, self.anonymous_header_guest) else: self.update_summary_arbiter(model, loss, ic_val) metric_name = f"stepwise_{self.n_step}" metric = [Metric(metric_name, float(self.n_step))] model.callback_metric(metric_name=metric_name, metric_namespace=self.metric_namespace, metric_data=metric) model.tracker.set_metric_meta(metric_name=metric_name, metric_namespace=self.metric_namespace, metric_meta=MetricMeta(name=metric_name, metric_type=self.metric_type, extra_metas=metas)) LOGGER.info(f"metric_name: {metric_name}, metas: {metas}") return def sync_step_best(self, step_models): if self.role == consts.ARBITER: step_best = self.get_step_best(step_models) self.transfer_variable.step_best.remote(step_best, role=consts.HOST, suffix=(self.n_step,)) self.transfer_variable.step_best.remote(step_best, role=consts.GUEST, suffix=(self.n_step,)) LOGGER.info(f"step {self.n_step}, step_best sent is {step_best}") else: step_best = self.transfer_variable.step_best.get(suffix=(self.n_step,))[0] LOGGER.info(f"step {self.n_step}, step_best received is {step_best}") return step_best @staticmethod def mask2string(host_mask, guest_mask): mask = np.append(host_mask, guest_mask) string_repr = ''.join('1' if i else '0' for i in mask) return string_repr @staticmethod def string2mask(string_repr): mask = np.fromiter(map(int, string_repr), dtype=bool) return mask @staticmethod def predict(data_instances, model): if data_instances is None: return pred_result = model.predict(data_instances) return pred_result def get_IC_computer(self, model): if model.model_name == 'HeteroLinearRegression': return IC_Approx() else: return IC() def run(self, component_parameters, train_data, validate_data, model): LOGGER.info("Enter stepwise") self._init_model(component_parameters) j_host, j_guest = self.sync_data_info(train_data) if train_data is not None: self.anonymous_header = data_overview.get_anonymous_header(train_data) if self.backward: host_mask, guest_mask = np.ones(j_host, dtype=bool), np.ones(j_guest, dtype=bool) else: host_mask, guest_mask = np.zeros(j_host, dtype=bool), np.zeros(j_guest, dtype=bool) self.IC_computer = self.get_IC_computer(model) self._set_k() while self.n_step <= self.max_step: LOGGER.info("Enter step {}".format(self.n_step)) step_models = set() step_models.add(HeteroStepwise.mask2string(host_mask, guest_mask)) n_model = 0 if self.backward: self.step_direction = "backward" LOGGER.info("step {}, direction: {}".format(self.n_step, self.step_direction)) if self.n_step == 0: backward_gen = [[host_mask, guest_mask]] else: backward_host, backward_guest = HeteroStepwise.drop_one(host_mask), HeteroStepwise.drop_one( guest_mask) backward_gen = itertools.chain(zip(backward_host, itertools.cycle([guest_mask])), zip(itertools.cycle([host_mask]), backward_guest)) for curr_host_mask, curr_guest_mask in backward_gen: model_mask = HeteroStepwise.mask2string(curr_host_mask, curr_guest_mask) step_models.add(model_mask) if model_mask not in self.models_trained: if self.role == consts.ARBITER: feature_mask = None elif self.role == consts.HOST: feature_mask = curr_host_mask else: feature_mask = curr_guest_mask self._run_step(model, train_data, validate_data, feature_mask, n_model, model_mask) n_model += 1 if self.forward: self.step_direction = "forward" LOGGER.info("step {}, direction: {}".format(self.n_step, self.step_direction)) forward_host, forward_guest = HeteroStepwise.add_one(host_mask), HeteroStepwise.add_one(guest_mask) if sum(guest_mask) + sum(host_mask) == 0: if self.n_step == 0: forward_gen = [[host_mask, guest_mask]] else: forward_gen = itertools.product(list(forward_host), list(forward_guest)) else: forward_gen = itertools.chain(zip(forward_host, itertools.cycle([guest_mask])), zip(itertools.cycle([host_mask]), forward_guest)) for curr_host_mask, curr_guest_mask in forward_gen: model_mask = HeteroStepwise.mask2string(curr_host_mask, curr_guest_mask) step_models.add(model_mask) LOGGER.info(f"step {self.n_step}, mask {model_mask}") if model_mask not in self.models_trained: if self.role == consts.ARBITER: feature_mask = None elif self.role == consts.HOST: feature_mask = curr_host_mask else: feature_mask = curr_guest_mask self._run_step(model, train_data, validate_data, feature_mask, n_model, model_mask) n_model += 1 # forward step 0 if sum(host_mask) + sum(guest_mask) == 0 and self.n_step == 0: model_mask = HeteroStepwise.mask2string(host_mask, guest_mask) self.record_step_best(model_mask, host_mask, guest_mask, train_data, model) self.n_step += 1 continue old_host_mask, old_guest_mask = host_mask, guest_mask step_best = self.sync_step_best(step_models) step_best_mask = HeteroStepwise.string2mask(step_best) host_mask, guest_mask = step_best_mask[:j_host], step_best_mask[j_host:] LOGGER.debug("step {}, best_host_mask {}, best_guest_mask {}".format(self.n_step, host_mask, guest_mask)) self.stop_stepwise = self.check_stop(host_mask, guest_mask, old_host_mask, old_guest_mask) if self.stop_stepwise: break self.record_step_best(step_best, host_mask, guest_mask, train_data, model) self.n_step += 1 mask_string = HeteroStepwise.mask2string(host_mask, guest_mask) model_info = self.models_trained[mask_string] best_model_key = model_info.get_key() best_model = self._get_model(best_model_key) model.load_model(best_model)
22,445
43.981964
117
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/test/stepwise_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import unittest import uuid from fate_arch.common import profile from fate_arch.session import computing_session as session from federatedml.model_selection.stepwise.hetero_stepwise import HeteroStepwise from federatedml.util import consts profile._PROFILE_LOG_ENABLED = False class TestStepwise(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) session.init("test_random_sampler_" + self.job_id) model = HeteroStepwise() model.__setattr__('role', consts.GUEST) model.__setattr__('fit_intercept', True) self.model = model data_num = 100 feature_num = 5 bool_list = [True, False, True, True, False] self.str_mask = "10110" self.header = ["x1", "x2", "x3", "x4", "x5"] self.mask = self.prepare_mask(bool_list) def prepare_mask(self, bool_list): mask = np.array(bool_list, dtype=bool) return mask def test_get_dfe(self): real_dfe = 4 dfe = HeteroStepwise.get_dfe(self.model, self.str_mask) self.assertEqual(dfe, real_dfe) def test_drop_one(self): real_masks = [np.array([0, 0, 1, 1, 0], dtype=bool), np.array([1, 0, 0, 1, 0], dtype=bool), np.array([1, 0, 1, 0, 0], dtype=bool)] mask_generator = HeteroStepwise.drop_one(self.mask) i = 0 for mask in mask_generator: np.testing.assert_array_equal( mask, real_masks[i], f"In stepwise_test drop one: mask{mask} not equal to expected {real_masks[i]}") i += 1 def test_add_one(self): real_masks = [np.array([1, 1, 1, 1, 0], dtype=bool), np.array([1, 0, 1, 1, 1], dtype=bool)] mask_generator = HeteroStepwise.add_one(self.mask) i = 0 for mask in mask_generator: np.testing.assert_array_equal(mask, real_masks[i], f"In stepwise_test add one: mask{mask} not equal to expected {real_masks[i]}") i += 1 def test_mask2string(self): real_str_mask = "1011010110" str_mask = HeteroStepwise.mask2string(self.mask, self.mask) self.assertTrue(str_mask == real_str_mask) def test_string2mask(self): real_mask = np.array([1, 0, 1, 1, 0], dtype=bool) mask = HeteroStepwise.string2mask(self.str_mask) np.testing.assert_array_equal(mask, real_mask) def test_get_to_enter(self): real_to_enter = ["x2", "x5"] to_enter = self.model.get_to_enter(self.mask, self.mask, self.header) self.assertListEqual(to_enter, real_to_enter) def tearDown(self): session.stop() if __name__ == '__main__': unittest.main()
3,374
34.15625
120
py
FATE
FATE-master/python/federatedml/model_selection/stepwise/test/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/model_selection/data_split/homo_data_split.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from federatedml.model_selection.data_split.data_split import DataSplitter from federatedml.util import LOGGER class HomoDataSplitHost(DataSplitter): def __init__(self): super().__init__() def fit(self, data_inst): LOGGER.debug(f"Enter Hetero {self.role} Data Split fit") if self.need_run is False: return self.param_validator(data_inst) ids = self._get_ids(data_inst) y = self._get_y(data_inst) id_train, id_test_validate, y_train, y_test_validate = self._split( ids, y, test_size=self.test_size + self.validate_size, train_size=self.train_size) validate_size, test_size = DataSplitter.get_train_test_size(self.validate_size, self.test_size) id_validate, id_test, y_validate, y_test = self._split(id_test_validate, y_test_validate, test_size=test_size, train_size=validate_size) LOGGER.info(f"Split ids obtained.") partitions = data_inst.partitions id_train_table = DataSplitter._parallelize_ids(id_train, partitions) id_validate_table = DataSplitter._parallelize_ids(id_validate, partitions) id_test_table = DataSplitter._parallelize_ids(id_test, partitions) train_data, validate_data, test_data = self.split_data(data_inst, id_train_table, id_validate_table, id_test_table) LOGGER.info(f"Split data finished.") all_metas = {} all_metas = self.callback_count_info(id_train, id_validate, id_test, all_metas) if self.stratified: all_metas = self.callback_label_info(y_train, y_validate, y_test, all_metas) self.callback(all_metas) self.set_summary(all_metas) return [train_data, validate_data, test_data] class HomoDataSplitGuest(DataSplitter): def __init__(self): super().__init__() def fit(self, data_inst): LOGGER.debug(f"Enter Hetero {self.role} Data Split fit") if self.need_run is False: return self.param_validator(data_inst) ids = self._get_ids(data_inst) y = self._get_y(data_inst) id_train, id_test_validate, y_train, y_test_validate = self._split( ids, y, test_size=self.test_size + self.validate_size, train_size=self.train_size) validate_size, test_size = DataSplitter.get_train_test_size(self.validate_size, self.test_size) id_validate, id_test, y_validate, y_test = self._split(id_test_validate, y_test_validate, test_size=test_size, train_size=validate_size) LOGGER.info(f"Split ids obtained.") partitions = data_inst.partitions id_train_table = DataSplitter._parallelize_ids(id_train, partitions) id_validate_table = DataSplitter._parallelize_ids(id_validate, partitions) id_test_table = DataSplitter._parallelize_ids(id_test, partitions) train_data, validate_data, test_data = self.split_data(data_inst, id_train_table, id_validate_table, id_test_table) LOGGER.info(f"Split data finished.") all_metas = {} all_metas = self.callback_count_info(id_train, id_validate, id_test, all_metas) if self.stratified: all_metas = self.callback_label_info(y_train, y_validate, y_test, all_metas) self.callback(all_metas) self.set_summary(all_metas) LOGGER.info(f"Callback given.") return [train_data, validate_data, test_data]
4,523
41.679245
109
py
FATE
FATE-master/python/federatedml/model_selection/data_split/data_split.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections from sklearn.model_selection import train_test_split from fate_arch.session import computing_session from federatedml.model_base import Metric, MetricMeta from federatedml.feature.binning.base_binning import BaseBinning from federatedml.model_base import ModelBase from federatedml.param.data_split_param import DataSplitParam from federatedml.util import LOGGER from federatedml.util import data_transform from federatedml.util.consts import FLOAT_ZERO ROUND_NUM = 3 class DataSplitter(ModelBase): def __init__(self): super().__init__() self.metric_name = "data_split" self.metric_namespace = "train" self.metric_type = "DATA_SPLIT" self.model_param = DataSplitParam() self.role = None self.need_transform = None def _init_model(self, params): self.random_state = params.random_state self.test_size = params.test_size self.train_size = params.train_size self.validate_size = params.validate_size self.stratified = params.stratified self.shuffle = params.shuffle self.split_points = params.split_points if self.split_points: self.split_points = sorted(self.split_points) self.need_run = params.need_run @staticmethod def _safe_divide(n, d): result = n / d if d > FLOAT_ZERO else 0.0 if result >= 1: result = 1.0 return result def _split(self, ids, y, test_size, train_size): if test_size <= FLOAT_ZERO: return ids, [], y, [] if train_size <= FLOAT_ZERO: return [], ids, [], y stratify = y if self.stratified else None if not isinstance(test_size, int): train_size = round(train_size * len(ids)) test_size = len(ids) - train_size id_train, id_test, y_train, y_test = train_test_split(ids, y, test_size=test_size, train_size=train_size, random_state=self.random_state, shuffle=self.shuffle, stratify=stratify) return id_train, id_test, y_train, y_test def _get_ids(self, data_inst): ids = sorted([i for i, v in data_inst.mapValues(lambda v: None).collect()]) return ids def _get_y(self, data_inst): if self.stratified: y = [v for i, v in data_inst.mapValues(lambda v: v.label).collect()] if self.need_transform: y = self.transform_regression_label(data_inst) else: # make dummy y y = [0] * (data_inst.count()) return y def check_need_transform(self): if self.split_points is not None: if len(self.split_points) == 0: self.need_transform = False else: # only need to produce binned labels if stratified split needed if self.stratified: self.need_transform = True return @staticmethod def get_train_test_size(train_size, test_size): LOGGER.debug(f"original train is {train_size}, original test_size is {test_size}") # return original set size if int if isinstance(test_size, int) and isinstance(train_size, int): return train_size, test_size total_size = test_size + train_size new_train_size = DataSplitter._safe_divide(train_size, total_size) new_test_size = DataSplitter._safe_divide(test_size, total_size) LOGGER.debug(f"new_train_size is {new_train_size}, new_test_size is {new_test_size}") return new_train_size, new_test_size def param_validator(self, data_inst): """ Validate & transform param inputs """ # check if need label transform self.check_need_transform() # check & transform data set sizes n_count = data_inst.count() if isinstance(self.test_size, float) or isinstance(self.train_size, float) or isinstance(self.validate_size, float): total_size = 1.0 else: total_size = n_count if self.train_size is None: if self.validate_size is None: self.train_size = total_size - self.test_size self.validate_size = total_size - (self.test_size + self.train_size) else: if self.test_size is None: self.test_size = 0 self.train_size = total_size - (self.validate_size + self.test_size) elif self.test_size is None: if self.validate_size is None: self.test_size = total_size - self.train_size self.validate_size = total_size - (self.test_size + self.train_size) else: self.test_size = total_size - (self.validate_size + self.train_size) elif self.validate_size is None: if self.train_size is None: self.train_size = total_size - self.test_size self.validate_size = total_size - (self.test_size + self.train_size) if abs((abs(self.train_size) + abs(self.test_size) + abs(self.validate_size)) - total_size) > FLOAT_ZERO: raise ValueError(f"train_size, test_size, validate_size should sum up to 1.0 or data count") return def transform_regression_label(self, data_inst): edge = self.split_points[-1] + 1 split_points_bin = self.split_points + [edge] bin_labels = data_inst.mapValues(lambda v: BaseBinning.get_bin_num(v.label, split_points_bin)) binned_y = [v for k, v in bin_labels.collect()] return binned_y @staticmethod def get_class_freq(y, split_points=None, label_names=None): """ get frequency info of a given y set; only called when stratified is true :param y: list, y sample :param split_points: list, split points used to bin regression values :param label_names: list, label names of all data :return: dict """ freq_dict = collections.Counter(y) freq_keys = freq_dict.keys() # continuous label if split_points is not None and len(split_points) > 0: label_count = len(split_points) + 1 # fill in count for missing bins if len(freq_keys) < label_count: for i in range(label_count): if i not in freq_keys: freq_dict[i] = 0 # categorical label else: if label_names is None: raise ValueError("No label values collected.") label_count = len(label_names) # fill in count for missing labels if len(freq_keys) < label_count: for label in label_names: if label not in freq_keys: freq_dict[label] = 0 return freq_dict def callback_count_info(self, id_train, id_validate, id_test, all_metas): """ Tool to callback returned data count & ratio information Parameters ---------- id_train: list or table, id of data set id_validate: list or table, id of data set id_test: list or table, id of data set all_metas: dict, all meta info Returns ------- dict """ metas = {} if isinstance(id_train, list): train_count = len(id_train) validate_count = len(id_validate) test_count = len(id_test) else: train_count = id_train.count() validate_count = id_validate.count() test_count = id_test.count() metas["train"] = train_count metas["validate"] = validate_count metas["test"] = test_count original_count = train_count + validate_count + test_count metas["original"] = original_count metric_name = f"{self.metric_name}_count_info" all_metas[metric_name] = metas metas = {} train_ratio = train_count / original_count validate_ratio = validate_count / original_count test_ratio = test_count / original_count metas["train"] = round(train_ratio, ROUND_NUM) metas["validate"] = round(validate_ratio, ROUND_NUM) metas["test"] = round(test_ratio, ROUND_NUM) metric_name = f"{self.metric_name}_ratio_info" all_metas[metric_name] = metas # stratified all_metas["stratified"] = self.stratified return all_metas def callback_label_info(self, y_train, y_validate, y_test, all_metas): """ Tool to callback returned data label information Parameters ---------- y_train: list, y y_validate: list, y y_test: list, y all_metas: dict, all meta info Returns ------- None """ metas = {} y_all = y_train + y_validate + y_test label_names = None if self.split_points is None: label_names = list(set(y_all)) original_freq_dict = DataSplitter.get_class_freq(y_all, self.split_points, label_names) metas["original"] = original_freq_dict train_freq_dict = DataSplitter.get_class_freq(y_train, self.split_points, label_names) metas["train"] = train_freq_dict validate_freq_dict = DataSplitter.get_class_freq(y_validate, self.split_points, label_names) metas["validate"] = validate_freq_dict test_freq_dict = DataSplitter.get_class_freq(y_test, self.split_points, label_names) metas["test"] = test_freq_dict if self.split_points is not None and len(self.split_points) > 0: metas["split_points"] = self.split_points metas["continuous_label"] = True else: metas["label_names"] = label_names metas["continuous_label"] = False metric_name = f"{self.metric_name}_label_info" all_metas[metric_name] = metas return all_metas def callback(self, metas): metric = [Metric(self.metric_name, 0)] self.callback_metric(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_data=metric) self.tracker.set_metric_meta(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_meta=MetricMeta(name=self.metric_name, metric_type=self.metric_type, extra_metas=metas)) @staticmethod def _match_id(data_inst, id_table): # ids = [(i, None) for i in ids] # id_table = computing_session.parallelize(ids, include_key=True, partition=data_inst.partitions) return data_inst.join(id_table, lambda v1, v2: v1) @staticmethod def _parallelize_ids(ids, partitions): ids = [(i, None) for i in ids] id_table = computing_session.parallelize(ids, include_key=True, partition=partitions) return id_table @staticmethod def _set_output_table_schema(data_inst, schema): if schema is not None and data_inst.count() > 0: data_transform.set_schema(data_inst, schema) def split_data(self, data_inst, id_train, id_validate, id_test): train_data = DataSplitter._match_id(data_inst, id_train) validate_data = DataSplitter._match_id(data_inst, id_validate) test_data = DataSplitter._match_id(data_inst, id_test) schema = getattr(data_inst, "schema", None) self._set_output_table_schema(train_data, schema) self._set_output_table_schema(validate_data, schema) self._set_output_table_schema(test_data, schema) return train_data, validate_data, test_data def fit(self, data_inst): raise NotImplementedError("fit method in data_split should not be called here.")
12,672
38.235294
118
py
FATE
FATE-master/python/federatedml/model_selection/data_split/hetero_data_split.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from federatedml.model_selection.data_split.data_split import DataSplitter from federatedml.transfer_variable.transfer_class.data_split_transfer_variable import \ DataSplitTransferVariable from federatedml.util import LOGGER from federatedml.util import consts class HeteroDataSplitHost(DataSplitter): def __init__(self): super().__init__() self.transfer_variable = DataSplitTransferVariable() def fit(self, data_inst): if self.need_run is False: return LOGGER.debug(f"Enter Hetero {self.role} Data Split fit") id_train_table = self.transfer_variable.id_train.get(idx=0) id_test_table = self.transfer_variable.id_test.get(idx=0) id_validate_table = self.transfer_variable.id_validate.get(idx=0) LOGGER.info(f"ids obtained from Guest.") train_data, validate_data, test_data = self.split_data(data_inst, id_train_table, id_validate_table, id_test_table) LOGGER.info(f"Split data finished.") all_metas = {} all_metas = self.callback_count_info(id_train_table, id_validate_table, id_test_table, all_metas) self.callback(all_metas) self.set_summary(all_metas) LOGGER.info(f"Callback given.") return [train_data, validate_data, test_data] class HeteroDataSplitGuest(DataSplitter): def __init__(self): super().__init__() self.transfer_variable = DataSplitTransferVariable() def fit(self, data_inst): LOGGER.debug(f"Enter Hetero {self.role} Data Split fit") if self.need_run is False: return self.param_validator(data_inst) ids = self._get_ids(data_inst) y = self._get_y(data_inst) id_train, id_test_validate, y_train, y_test_validate = self._split( ids, y, test_size=self.test_size + self.validate_size, train_size=self.train_size) validate_size, test_size = DataSplitter.get_train_test_size(self.validate_size, self.test_size) id_validate, id_test, y_validate, y_test = self._split(id_test_validate, y_test_validate, test_size=test_size, train_size=validate_size) LOGGER.info(f"Split ids obtained.") partitions = data_inst.partitions id_train_table = DataSplitter._parallelize_ids(id_train, partitions) id_validate_table = DataSplitter._parallelize_ids(id_validate, partitions) id_test_table = DataSplitter._parallelize_ids(id_test, partitions) self.transfer_variable.id_train.remote(obj=id_train_table, role=consts.HOST, idx=-1) self.transfer_variable.id_test.remote(obj=id_test_table, role=consts.HOST, idx=-1) self.transfer_variable.id_validate.remote(obj=id_validate_table, role=consts.HOST, idx=-1) LOGGER.info(f"ids remote to Host(s)") train_data, validate_data, test_data = self.split_data(data_inst, id_train_table, id_validate_table, id_test_table) LOGGER.info(f"Split data finished.") all_metas = {} all_metas = self.callback_count_info(id_train, id_validate, id_test, all_metas) # summary["data_split_count_info"] = all_metas if self.stratified: all_metas = self.callback_label_info(y_train, y_validate, y_test, all_metas) #summary["data_split_label_info"] = all_metas self.callback(all_metas) self.set_summary(all_metas) LOGGER.info(f"Callback given.") return [train_data, validate_data, test_data]
4,656
42.933962
109
py
FATE
FATE-master/python/federatedml/model_selection/data_split/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from federatedml.model_selection.k_fold import KFold from federatedml.model_selection.mini_batch import MiniBatch __all__ = ['MiniBatch', "KFold"]
765
35.47619
75
py
FATE
FATE-master/python/federatedml/model_selection/data_split/test/data_split_test.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import uuid import numpy as np from fate_arch.common import profile from fate_arch.session import computing_session as session from federatedml.feature.instance import Instance from federatedml.model_selection.data_split import data_split from federatedml.param.data_split_param import DataSplitParam profile._PROFILE_LOG_ENABLED = False class TestDataSplit(unittest.TestCase): def setUp(self): self.job_id = str(uuid.uuid1()) session.init("test_random_sampler_" + self.job_id) self.data_splitter = data_split.DataSplitter() param_dict = {"random_state": 42, "test_size": 0.2, "train_size": 0.6, "validate_size": 0.2, "stratified": True, "shuffle": True, "split_points": [0.5, 0.2]} params = DataSplitParam(**param_dict) self.data_splitter._init_model(params) def prepare_data(self, data_num, feature_num): final_result = [] for i in range(data_num): tmp = i * np.ones(feature_num) label_tmp = np.random.random(1)[0] inst = Instance(inst_id=i, features=tmp, label=label_tmp) tmp = (i, inst) final_result.append(tmp) table = session.parallelize(final_result, include_key=True, partition=3) return table def test_transform_regression_label(self, data_num=100): data_instances = self.prepare_data(data_num, feature_num=10) expect_class_count = len(self.data_splitter.split_points) + 1 bin_y = self.data_splitter.transform_regression_label(data_instances) bin_class_count = len(set(bin_y)) self.assertEqual(expect_class_count, bin_class_count) def test_get_train_test_size(self): expect_validate_size, expect_test_size = 0.5, 0.5 validate_size, test_size = self.data_splitter.get_train_test_size(self.data_splitter.test_size, self.data_splitter.validate_size) self.assertAlmostEqual(expect_test_size, test_size) self.assertAlmostEqual(expect_validate_size, validate_size) def test_get_class_freq(self): y = [1] * 10 + [0] * 3 + [1] * 20 + [2] * 10 + [0] * 2 + [2] * 5 expect_freq_0 = 5 expect_freq_1 = 30 expect_freq_2 = 15 freq_dict = data_split.DataSplitter.get_class_freq(y, label_names=[0, 1, 2]) self.assertAlmostEqual(expect_freq_0, freq_dict[0]) self.assertAlmostEqual(expect_freq_1, freq_dict[1]) self.assertAlmostEqual(expect_freq_2, freq_dict[2]) def tearDown(self): session.stop() if __name__ == '__main__': unittest.main()
3,384
36.197802
107
py
FATE
FATE-master/python/federatedml/model_selection/data_split/test/__init__.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
616
37.5625
75
py
FATE
FATE-master/python/federatedml/semi_supervised_learning/positive_unlabeled/positive_unlabeled_transformer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy from fate_arch.session import computing_session from federatedml.util import LOGGER from federatedml.util import consts from federatedml.model_base import ModelBase from federatedml.model_base import Metric, MetricMeta from federatedml.param.positive_unlabeled_param import PositiveUnlabeledParam class PositiveUnlabeled(ModelBase): def __init__(self): super().__init__() self.model_param = PositiveUnlabeledParam() self.metric_name = "positive_unlabeled" self.metric_namespace = "train" self.metric_type = "PU_MODEL" self.replaced_label_list = [] self.converted_unlabeled_count = 0 def _init_model(self, model_param): self.strategy = model_param.strategy self.threshold = model_param.threshold def probability_process(self, label_score_table): def replaced_func(x): if x[1] >= self.threshold and x[0] == 0: return 1 else: return x[0] def summarized_func(r, l): if r == 1 and l[0] == 0: return 1 else: return 0 LOGGER.info("Switch probability strategy") replaced_label_table = label_score_table.mapValues(replaced_func) summary_table = replaced_label_table.join(label_score_table, summarized_func) self.converted_unlabeled_count = summary_table.filter(lambda k, v: v == 1).count() return replaced_label_table def quantity_process(self, label_score_table): LOGGER.info("Switch quantity strategy") label_score_list = list(label_score_table.collect()) label_score_list.sort(key=lambda x: x[1][1], reverse=True) LOGGER.info("Count unlabeled samples") label_list = [v[0] for (_, v) in label_score_list] unlabeled_count = label_list.count(0) if int(self.threshold) > unlabeled_count: LOGGER.warning("Param 'threshold' should no larger than unlabeled count") accumulated_count = 0 for idx, (k, v) in enumerate(label_score_list): if accumulated_count < int(self.threshold) and v[0] == 0: self.replaced_label_list.append((k, 1)) self.converted_unlabeled_count += 1 accumulated_count += 1 else: self.replaced_label_list.append((k, int(v[0]))) def proportion_process(self, label_score_table): LOGGER.info("Switch proportion strategy") label_score_list = list(label_score_table.collect()) label_score_list.sort(key=lambda x: x[1][1], reverse=True) LOGGER.info("Compute threshold index") total_num = label_score_table.count() threshold_idx = int(total_num * self.threshold) for idx, (k, v) in enumerate(label_score_list): if idx < threshold_idx and v[0] == 0: self.replaced_label_list.append((k, 1)) self.converted_unlabeled_count += 1 else: self.replaced_label_list.append((k, int(v[0]))) def distribution_process(self, label_score_table): LOGGER.info("Switch distribution strategy") label_score_list = list(label_score_table.collect()) label_score_list.sort(key=lambda x: x[1][1], reverse=True) LOGGER.info("Compute threshold index") total_num = label_score_table.count() unlabeled_num = label_score_table.filter(lambda k, v: v[0] == 0).count() threshold_idx = int((unlabeled_num / total_num) * self.threshold) for idx, (k, v) in enumerate(label_score_list): if idx < threshold_idx and v[0] == 0: self.replaced_label_list.append((k, 1)) self.converted_unlabeled_count += 1 else: self.replaced_label_list.append((k, int(v[0]))) def apply_labeling_strategy(self, strategy, label_score_table): if strategy == consts.PROBABILITY: return self.probability_process(label_score_table) elif strategy == consts.QUANTITY: self.quantity_process(label_score_table) elif strategy == consts.PROPORTION: self.proportion_process(label_score_table) else: self.distribution_process(label_score_table) def replace_table_labels(self, intersect_table, label_table): return intersect_table.join(label_table, lambda i, l: self.replace_instance_label(i, l)) def callback_info(self): self.add_summary("count of converted unlabeled", self.converted_unlabeled_count) self.callback_metric(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_data=[Metric("count of converted unlabeled", self.converted_unlabeled_count)]) self.tracker.set_metric_meta(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_meta=MetricMeta(name=self.metric_name, metric_type=self.metric_type)) @staticmethod def replace_instance_label(inst, label): copied_inst = copy.deepcopy(inst) copied_inst.label = label return copied_inst def fit(self, data_insts): LOGGER.info("Convert labels by positive unlabeled transformer") data_insts_list = list(data_insts.values()) if self.role == consts.GUEST: LOGGER.info("Identify intersect and predict table") if "predict_score" not in data_insts_list[0].schema["header"]: intersect_table, predict_table = data_insts_list[0], data_insts_list[1] else: intersect_table, predict_table = data_insts_list[1], data_insts_list[0] LOGGER.info("Extract table of label and predict score") label_score_table = predict_table.mapValues(lambda x: x.features[0:3:2]) LOGGER.info("Replace labels by labeling strategy") if self.strategy != consts.PROBABILITY: self.apply_labeling_strategy(strategy=self.strategy, label_score_table=label_score_table) replaced_label_table = computing_session.parallelize(self.replaced_label_list, include_key=True, partition=intersect_table.partitions) else: replaced_label_table = self.apply_labeling_strategy(strategy=self.strategy, label_score_table=label_score_table) LOGGER.info("Construct replaced intersect table") replaced_intersect_table = self.replace_table_labels(intersect_table, replaced_label_table) replaced_intersect_table.schema = intersect_table.schema LOGGER.info("Obtain positive unlabeled summary") self.callback_info() return replaced_intersect_table elif self.role == consts.HOST: LOGGER.info("Identify intersect table") if data_insts_list[0]: intersect_table = data_insts_list[0] else: intersect_table = data_insts_list[1] return intersect_table
7,934
41.891892
114
py
FATE
FATE-master/python/federatedml/semi_supervised_learning/positive_unlabeled/__init__.py
0
0
0
py
FATE
FATE-master/python/federatedml/linear_model/linear_model_weight.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from federatedml.framework.weights import ListWeights, TransferableWeights from federatedml.util import LOGGER, paillier_check, ipcl_operator class LinearModelWeights(ListWeights): def __init__(self, l, fit_intercept, raise_overflow_error=True): l = np.array(l) if l.shape != (0,) and not paillier_check.is_paillier_encrypted_number(l): if np.max(np.abs(l)) > 1e8: if raise_overflow_error: raise RuntimeError( "The model weights are overflow, please check if the input data has been normalized") else: LOGGER.warning( f"LinearModelWeights contains entry greater than 1e8.") super().__init__(l) self.fit_intercept = fit_intercept self.raise_overflow_error = raise_overflow_error def for_remote(self): return TransferableWeights(self._weights, self.__class__, self.fit_intercept) @property def coef_(self): if self.fit_intercept: if paillier_check.is_single_ipcl_encrypted_number(self._weights): coeffs = ipcl_operator.get_coeffs(self._weights.item(0)) return np.array(coeffs) return np.array(self._weights[:-1]) return np.array(self._weights) @property def intercept_(self): if self.fit_intercept: if paillier_check.is_single_ipcl_encrypted_number(self._weights): return ipcl_operator.get_intercept(self._weights.item(0)) return 0.0 if len(self._weights) == 0 else self._weights[-1] return 0.0 def binary_op(self, other: 'LinearModelWeights', func, inplace): if inplace: for k, v in enumerate(self._weights): self._weights[k] = func(self._weights[k], other._weights[k]) return self else: _w = [] for k, v in enumerate(self._weights): _w.append(func(self._weights[k], other._weights[k])) return LinearModelWeights(_w, self.fit_intercept, self.raise_overflow_error) def map_values(self, func, inplace): if paillier_check.is_single_ipcl_encrypted_number(self._weights): if inplace: self._weights = np.array(func(self.unboxed.item(0))) return self else: _w = func(self.unboxed.item(0)) return LinearModelWeights(_w, self.fit_intercept) if inplace: for k, v in enumerate(self._weights): self._weights[k] = func(v) return self else: _w = [] for v in self._weights: _w.append(func(v)) return LinearModelWeights(_w, self.fit_intercept) def __repr__(self): return f"weights: {self.coef_}, intercept: {self.intercept_}"
3,567
36.557895
109
py
FATE
FATE-master/python/federatedml/linear_model/linear_model_base.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from federatedml.model_base import Metric from federatedml.model_base import MetricMeta from federatedml.feature.sparse_vector import SparseVector from federatedml.model_base import ModelBase from federatedml.model_selection import start_cross_validation from federatedml.model_selection.stepwise import start_stepwise from federatedml.optim.convergence import converge_func_factory from federatedml.optim.initialize import Initializer from federatedml.optim.optimizer import optimizer_factory from federatedml.statistic import data_overview from federatedml.util import LOGGER from federatedml.util import abnormal_detection from federatedml.util import consts from federatedml.callbacks.validation_strategy import ValidationStrategy class BaseLinearModel(ModelBase): def __init__(self): super(BaseLinearModel, self).__init__() # attribute: self.n_iter_ = 0 self.classes_ = None self.feature_shape = None self.gradient_operator = None self.initializer = Initializer() self.transfer_variable = None self.loss_history = [] self.is_converged = False self.header = None self.model_name = 'toSet' self.model_param_name = 'toSet' self.model_meta_name = 'toSet' self.role = '' self.mode = '' self.schema = {} self.cipher_operator = None self.model_weights = None self.validation_freqs = None self.need_one_vs_rest = False self.need_call_back_loss = True self.init_param_obj = None self.early_stop = None self.tol = None def _init_model(self, params): self.model_param = params self.alpha = params.alpha self.init_param_obj = params.init_param # self.fit_intercept = self.init_param_obj.fit_intercept self.batch_size = params.batch_size if hasattr(params, "shuffle"): self.shuffle = params.shuffle if hasattr(params, "masked_rate"): self.masked_rate = params.masked_rate if hasattr(params, "batch_strategy"): self.batch_strategy = params.batch_strategy self.max_iter = params.max_iter self.optimizer = optimizer_factory(params) self.early_stop = params.early_stop self.tol = params.tol self.converge_func = converge_func_factory(params.early_stop, params.tol) self.validation_freqs = params.callback_param.validation_freqs self.validation_strategy = None self.early_stopping_rounds = params.callback_param.early_stopping_rounds self.metrics = params.callback_param.metrics self.use_first_metric_only = params.callback_param.use_first_metric_only # if len(self.component_properties.host_party_idlist) == 1: # LOGGER.debug(f"set_use_async") # self.gradient_loss_operator.set_use_async() def get_features_shape(self, data_instances): if self.feature_shape is not None: return self.feature_shape return data_overview.get_features_shape(data_instances) def set_header(self, header): self.header = header def get_header(self, data_instances): if self.header is not None: return self.header return data_instances.schema.get("header", []) @property def fit_intercept(self): return self.init_param_obj.fit_intercept def _get_meta(self): raise NotImplementedError("This method should be be called here") def _get_param(self): raise NotImplementedError("This method should be be called here") def export_model(self): LOGGER.debug(f"called export model") meta_obj = self._get_meta() param_obj = self._get_param() result = { self.model_meta_name: meta_obj, self.model_param_name: param_obj } return result def disable_callback_loss(self): self.need_call_back_loss = False def enable_callback_loss(self): self.need_call_back_loss = True def callback_loss(self, iter_num, loss): metric_meta = MetricMeta(name='train', metric_type="LOSS", extra_metas={ "unit_name": "iters", }) self.callback_meta(metric_name='loss', metric_namespace='train', metric_meta=metric_meta) self.callback_metric(metric_name='loss', metric_namespace='train', metric_data=[Metric(iter_num, loss)]) def _abnormal_detection(self, data_instances): """ Make sure input data_instances is valid. """ abnormal_detection.empty_table_detection(data_instances) abnormal_detection.empty_feature_detection(data_instances) ModelBase.check_schema_content(data_instances.schema) def init_validation_strategy(self, train_data=None, validate_data=None): validation_strategy = ValidationStrategy(self.role, self.mode, self.validation_freqs, self.early_stopping_rounds, self.use_first_metric_only) validation_strategy.set_train_data(train_data) validation_strategy.set_validate_data(validate_data) return validation_strategy def cross_validation(self, data_instances): return start_cross_validation.run(self, data_instances) def stepwise(self, data_instances): self.disable_callback_loss() return start_stepwise.run(self, data_instances) def _get_cv_param(self): self.model_param.cv_param.role = self.role self.model_param.cv_param.mode = self.mode return self.model_param.cv_param def _get_stepwise_param(self): self.model_param.stepwise_param.role = self.role self.model_param.stepwise_param.mode = self.mode return self.model_param.stepwise_param def set_schema(self, data_instance, header=None): if header is None: self.schema["header"] = self.header else: self.schema["header"] = header data_instance.schema = self.schema return data_instance def init_schema(self, data_instance): if data_instance is None: return self.schema = data_instance.schema self.header = self.schema.get('header') def get_weight_intercept_dict(self, header): weight_dict = {} for idx, header_name in enumerate(header): coef_i = self.model_weights.coef_[idx] weight_dict[header_name] = coef_i intercept_ = self.model_weights.intercept_ return weight_dict, intercept_ def get_model_summary(self): header = self.header if header is None: return {} weight_dict, intercept_ = self.get_weight_intercept_dict(header) summary = {"coef": weight_dict, "intercept": intercept_, "is_converged": self.is_converged, "best_iteration": self.callback_variables.best_iteration} if self.callback_variables.validation_summary is not None: summary["validation_metrics"] = self.callback_variables.validation_summary return summary def check_abnormal_values(self, data_instances): if data_instances is None: return def _check_overflow(data_iter): for _, instant in data_iter: features = instant.features if isinstance(features, SparseVector): sparse_data = features.get_all_data() for k, v in sparse_data: if np.abs(v) > consts.OVERFLOW_THRESHOLD: return True else: if np.max(np.abs(features)) > consts.OVERFLOW_THRESHOLD: return True return False check_status = data_instances.applyPartitions(_check_overflow) is_overflow = check_status.reduce(lambda a, b: a or b) if is_overflow: raise OverflowError("The value range of features is too large for GLM, please have " "a check for input data") LOGGER.info("Check for abnormal value passed") def prepare_fit(self, data_instances, validate_data): self.header = self.get_header(data_instances) self._abnormal_detection(data_instances) self.check_abnormal_values(data_instances) self.check_abnormal_values(validate_data)
9,360
37.208163
97
py
FATE
FATE-master/python/federatedml/linear_model/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
661
35.777778
75
py
FATE
FATE-master/python/federatedml/linear_model/coordinated_linear_model/base_linear_model_arbiter.py
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from federatedml.framework.hetero.procedure import convergence from federatedml.framework.hetero.procedure import paillier_cipher, batch_generator from federatedml.linear_model.linear_model_base import BaseLinearModel from federatedml.util import LOGGER from federatedml.util import consts from federatedml.util import fate_operator from federatedml.callbacks.validation_strategy import ValidationStrategy class HeteroBaseArbiter(BaseLinearModel): def __init__(self): super(BaseLinearModel, self).__init__() self.role = consts.ARBITER # attribute self.pre_loss = None self.loss_history = [] self.cipher = paillier_cipher.Arbiter() self.batch_generator = batch_generator.Arbiter() self.gradient_loss_operator = None self.converge_procedure = convergence.Arbiter() self.best_iteration = -1 def perform_subtasks(self, **training_info): """ performs any tasks that the arbiter is responsible for. This 'perform_subtasks' function serves as a handler on conducting any task that the arbiter is responsible for. For example, for the 'perform_subtasks' function of 'HeteroDNNLRArbiter' class located in 'hetero_dnn_lr_arbiter.py', it performs some works related to updating/training local neural networks of guest or host. For this particular class, the 'perform_subtasks' function will do nothing. In other words, no subtask is performed by this arbiter. :param training_info: a dictionary holding training information """ pass def init_validation_strategy(self, train_data=None, validate_data=None): validation_strategy = ValidationStrategy(self.role, self.mode, self.validation_freqs, self.early_stopping_rounds, self.use_first_metric_only) return validation_strategy def fit(self, data_instances=None, validate_data=None): """ Train linear model of role arbiter Parameters ---------- data_instances: Table of Instance, input data """ LOGGER.info("Enter hetero linear model arbiter fit") self.cipher_operator = self.cipher.paillier_keygen( self.model_param.encrypt_param.method, self.model_param.encrypt_param.key_length) self.batch_generator.initialize_batch_generator() self.gradient_loss_operator.set_total_batch_nums(self.batch_generator.batch_num) # self.validation_strategy = self.init_validation_strategy(data_instances, validate_data) self.callback_list.on_train_begin(data_instances, validate_data) if self.component_properties.is_warm_start: self.callback_warm_start_init_iter(self.n_iter_) while self.n_iter_ < self.max_iter: self.callback_list.on_epoch_begin(self.n_iter_) iter_loss = None batch_data_generator = self.batch_generator.generate_batch_data() total_gradient = None self.optimizer.set_iters(self.n_iter_) for batch_index in batch_data_generator: # Compute and Transfer gradient info gradient = self.gradient_loss_operator.compute_gradient_procedure(self.cipher_operator, self.optimizer, self.n_iter_, batch_index) if total_gradient is None: total_gradient = gradient else: total_gradient = total_gradient + gradient training_info = {"iteration": self.n_iter_, "batch_index": batch_index} self.perform_subtasks(**training_info) loss_list = self.gradient_loss_operator.compute_loss(self.cipher_operator, self.n_iter_, batch_index) if len(loss_list) == 1: if iter_loss is None: iter_loss = loss_list[0] else: iter_loss += loss_list[0] # LOGGER.info("Get loss from guest:{}".format(de_loss)) # if converge if iter_loss is not None: iter_loss /= self.batch_generator.batch_num if self.need_call_back_loss: self.callback_loss(self.n_iter_, iter_loss) self.loss_history.append(iter_loss) if self.model_param.early_stop == 'weight_diff': # LOGGER.debug("total_gradient: {}".format(total_gradient)) weight_diff = fate_operator.norm(total_gradient) # LOGGER.info("iter: {}, weight_diff:{}, is_converged: {}".format(self.n_iter_, # weight_diff, self.is_converged)) if weight_diff < self.model_param.tol: self.is_converged = True else: if iter_loss is None: raise ValueError("Multiple host situation, loss early stop function is not available." "You should use 'weight_diff' instead") self.is_converged = self.converge_func.is_converge(iter_loss) LOGGER.info("iter: {}, loss:{}, is_converged: {}".format(self.n_iter_, iter_loss, self.is_converged)) self.converge_procedure.sync_converge_info(self.is_converged, suffix=(self.n_iter_,)) self.callback_list.on_epoch_end(self.n_iter_) self.n_iter_ += 1 if self.stop_training: break if self.is_converged: break LOGGER.debug(f"Finish_train, n_iter: {self.n_iter_}") self.callback_list.on_train_end() summary = {"loss_history": self.loss_history, "is_converged": self.is_converged, "best_iteration": self.best_iteration} # if self.validation_strategy and self.validation_strategy.has_saved_best_model(): # self.load_model(self.validation_strategy.cur_best_model) if self.loss_history is not None and len(self.loss_history) > 0: summary["best_iter_loss"] = self.loss_history[self.best_iteration] self.set_summary(summary) LOGGER.debug("finish running linear model arbiter")
7,192
45.707792
118
py
FATE
FATE-master/python/federatedml/linear_model/coordinated_linear_model/__init__.py
0
0
0
py