repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
param = {
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-4,
"alpha": 0.01,
"max_iter": 5,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"validation_freqs": None,
"early_stopping_rounds": None
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,439 | 40.170732 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_pearson_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import HeteroPearson
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
pearson_0 = HeteroPearson(name='pearson_0', column_indexes=-1)
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
param = {
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-4,
"alpha": 0.01,
"max_iter": 5,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"validation_freqs": None,
"early_stopping_rounds": None
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(pearson_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,646 | 40.373206 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_test_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import HeteroSecureBoost
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_eval_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
reader_2 = Reader(name="reader_2")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data)
reader_2.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data)
reader_2.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
data_transform_2 = DataTransform(name="data_transform_2") # start component numbering at 2
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
data_transform_2.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_2.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
intersection_2 = Intersection(name="intersection_2", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
hetero_feature_selection_2 = HeteroFeatureSelection(name='hetero_feature_selection_2')
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
hetero_secureboost_1 = HeteroSecureBoost(name='hetero_secureboost_1')
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(reader_2)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(data_transform_2,
data=Data(data=reader_2.output.data), model=Model(data_transform_1.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(intersection_2, data=Data(data=data_transform_2.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_feature_selection_2, data=Data(data=intersection_2.output.data),
model=Model(hetero_feature_selection_1.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(hetero_secureboost_1, data=Data(test_data=hetero_feature_selection_2.output.data),
model=Model(hetero_secureboost_0.output.model))
pipeline.add_component(evaluation_0,
data=Data(data=[hetero_secureboost_0.output.data, hetero_secureboost_1.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 9,961 | 42.502183 | 113 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_one_hot_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import OneHotEncoder
from pipeline.component import HeteroSecureBoost
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "mock_tag_hetero_host", "namespace": "experiment"}
host_test_data = {"name": "mock_tag_hetero_host", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": False,
"delimitor": ",",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
one_hot_encoder_0 = OneHotEncoder(name="one_hot_encoder_0")
one_hot_encoder_1 = OneHotEncoder(name="one_hot_encoder_1")
one_hot_encoder_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=False)
one_hot_encoder_0.get_party_instance(role='host', party_id=host)
one_hot_encoder_1.get_party_instance(role='guest', party_id=guest).component_param(need_run=False)
one_hot_encoder_1.get_party_instance(role='host', party_id=host)
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(one_hot_encoder_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(one_hot_encoder_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(one_hot_encoder_0.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=one_hot_encoder_0.output.data,
validate_data=one_hot_encoder_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_secureboost_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 9,077 | 40.452055 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_secureboost_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,335 | 39.076923 | 113 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_union_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import HeteroSecureBoost
from pipeline.component import Union
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data_0 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_train_data_1 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data_0 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data_1 = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data_0 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_train_data_1 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data_0 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data_1 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
reader_2 = Reader(name="reader_2")
reader_3 = Reader(name="reader_3")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data_0)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data_1)
reader_2.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data_0)
reader_3.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data_1)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data_0)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data_1)
reader_2.get_party_instance(role='host', party_id=host).component_param(table=host_test_data_0)
reader_3.get_party_instance(role='host', party_id=host).component_param(table=host_test_data_1)
param = {
"name": "union_0",
"keep_duplicate": True
}
union_0 = Union(**param)
param = {
"name": "union_1",
"keep_duplicate": True
}
union_1 = Union(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(reader_2)
pipeline.add_component(reader_3)
pipeline.add_component(union_0, data=Data(data=[reader_0.output.data, reader_1.output.data]))
pipeline.add_component(union_1, data=Data(data=[reader_2.output.data, reader_3.output.data]))
pipeline.add_component(data_transform_0, data=Data(data=union_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=union_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secureboost_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 9,315 | 40.963964 | 113 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_pearson_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import HeteroSecureBoost
from pipeline.component import HeteroPearson
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
pearson_0 = HeteroPearson(name='pearson_0', column_indexes=-1)
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(pearson_0, data=Data(data=hetero_feature_selection_0.output.data))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_secureboost_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,523 | 39.207547 | 113 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_secureboost/pipeline_train_manually_sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import HeteroSecureBoost
from pipeline.component import DataTransform
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["manually", "unique_value", "iv_filter", "statistic_filter"],
"manually_param": {
"filter_out_indexes": [1, 2],
"filter_out_names": ["x2", "x3"]
},
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secureboost_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,452 | 38.872642 | 113 |
py
|
FATE
|
FATE-master/python/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_arch/relation_ship.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import ComputingEngine
from fate_arch.federation import FederationEngine
from fate_arch.storage import StorageEngine
from fate_arch.common.address import StandaloneAddress, EggRollAddress, HDFSAddress, \
MysqlAddress, \
PathAddress, LocalFSAddress, HiveAddress, LinkisHiveAddress, ApiAddress
from fate_arch.common import EngineType
class Relationship(object):
Computing = {
ComputingEngine.STANDALONE: {
EngineType.STORAGE: {
"default": StorageEngine.STANDALONE,
"support": [StorageEngine.STANDALONE]
},
EngineType.FEDERATION: {
"default": FederationEngine.STANDALONE,
"support": [FederationEngine.STANDALONE, FederationEngine.RABBITMQ, FederationEngine.PULSAR]
},
},
ComputingEngine.EGGROLL: {
EngineType.STORAGE: {
"default": StorageEngine.EGGROLL,
"support": [StorageEngine.EGGROLL]
},
EngineType.FEDERATION: {
"default": FederationEngine.EGGROLL,
"support": [FederationEngine.EGGROLL, FederationEngine.RABBITMQ, FederationEngine.PULSAR]
},
},
ComputingEngine.SPARK: {
EngineType.STORAGE: {
"default": StorageEngine.HDFS,
"support": [StorageEngine.HDFS, StorageEngine.HIVE, StorageEngine.LOCALFS]
},
EngineType.FEDERATION: {
"default": FederationEngine.RABBITMQ,
"support": [FederationEngine.PULSAR, FederationEngine.RABBITMQ]
},
},
ComputingEngine.LINKIS_SPARK: {
EngineType.STORAGE: {
"default": StorageEngine.LINKIS_HIVE,
"support": [StorageEngine.LINKIS_HIVE]
},
EngineType.FEDERATION: {
"default": FederationEngine.RABBITMQ,
"support": [FederationEngine.PULSAR, FederationEngine.RABBITMQ]
},
}
}
EngineToAddress = {
StorageEngine.STANDALONE: StandaloneAddress,
StorageEngine.EGGROLL: EggRollAddress,
StorageEngine.HDFS: HDFSAddress,
StorageEngine.MYSQL: MysqlAddress,
StorageEngine.HIVE: HiveAddress,
StorageEngine.LINKIS_HIVE: LinkisHiveAddress,
StorageEngine.LOCALFS: LocalFSAddress,
StorageEngine.PATH: PathAddress,
StorageEngine.API: ApiAddress
}
EngineConfMap = {
"fate_on_standalone": {
EngineType.COMPUTING: [(ComputingEngine.STANDALONE, "standalone")],
EngineType.STORAGE: [(StorageEngine.STANDALONE, "standalone")],
EngineType.FEDERATION: [(FederationEngine.STANDALONE, "standalone")]
},
"fate_on_eggroll": {
EngineType.COMPUTING: [(ComputingEngine.EGGROLL, "clustermanager")],
EngineType.STORAGE: [(StorageEngine.EGGROLL, "clustermanager")],
EngineType.FEDERATION: [(FederationEngine.EGGROLL, "rollsite")],
},
"fate_on_spark": {
EngineType.COMPUTING: [(ComputingEngine.SPARK, "spark"), (ComputingEngine.LINKIS_SPARK, "linkis_spark")],
EngineType.STORAGE: [(StorageEngine.HDFS, "hdfs"), (StorageEngine.HIVE, "hive"),
(StorageEngine.LINKIS_HIVE, "linkis_hive"), (StorageEngine.LOCALFS, "localfs")],
EngineType.FEDERATION: [(FederationEngine.RABBITMQ, "rabbitmq"), (FederationEngine.PULSAR, "pulsar")]
},
}
| 4,172 | 41.151515 | 117 |
py
|
FATE
|
FATE-master/python/fate_arch/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/_standalone.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import hashlib
import itertools
import pickle as c_pickle
import shutil
import time
import typing
import uuid
from collections import Iterable
from concurrent.futures import ProcessPoolExecutor as Executor
from contextlib import ExitStack
from functools import partial
from heapq import heapify, heappop, heapreplace
from operator import is_not
from pathlib import Path
import cloudpickle as f_pickle
import lmdb
import numpy as np
from fate_arch.common import Party, file_utils
from fate_arch.common.log import getLogger
from fate_arch.federation import FederationDataType
LOGGER = getLogger()
serialize = c_pickle.dumps
deserialize = c_pickle.loads
# default message max size in bytes = 1MB
DEFAULT_MESSAGE_MAX_SIZE = 1048576
# noinspection PyPep8Naming
class Table(object):
def __init__(
self,
session: "Session",
namespace: str,
name: str,
partitions,
need_cleanup=True,
):
self._need_cleanup = need_cleanup
self._namespace = namespace
self._name = name
self._partitions = partitions
self._session = session
@property
def partitions(self):
return self._partitions
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
def __del__(self):
if self._need_cleanup:
self.destroy()
def __str__(self):
return f"<Table {self._namespace}|{self._name}|{self._partitions}|{self._need_cleanup}>"
def __repr__(self):
return self.__str__()
def destroy(self):
for p in range(self._partitions):
with self._get_env_for_partition(p, write=True) as env:
db = env.open_db()
with env.begin(write=True) as txn:
txn.drop(db)
_TableMetaManager.destory_table(self._namespace, self._name)
def take(self, n, **kwargs):
if n <= 0:
raise ValueError(f"{n} <= 0")
return list(itertools.islice(self.collect(**kwargs), n))
def count(self):
cnt = 0
for p in range(self._partitions):
with self._get_env_for_partition(p) as env:
cnt += env.stat()["entries"]
return cnt
# noinspection PyUnusedLocal
def collect(self, **kwargs):
iterators = []
with ExitStack() as s:
for p in range(self._partitions):
env = s.enter_context(self._get_env_for_partition(p))
txn = s.enter_context(env.begin())
iterators.append(s.enter_context(txn.cursor()))
# Merge sorted
entries = []
for _id, it in enumerate(iterators):
if it.next():
key, value = it.item()
entries.append([key, value, _id, it])
heapify(entries)
while entries:
key, value, _, it = entry = entries[0]
yield deserialize(key), deserialize(value)
if it.next():
entry[0], entry[1] = it.item()
heapreplace(entries, entry)
else:
_, _, _, it = heappop(entries)
def reduce(self, func):
# noinspection PyProtectedMember
rs = self._session._submit_unary(func, _do_reduce, self._partitions, self._name, self._namespace)
rs = [r for r in filter(partial(is_not, None), rs)]
if len(rs) <= 0:
return None
rtn = rs[0]
for r in rs[1:]:
rtn = func(rtn, r)
return rtn
def map(self, func):
return self._unary(func, _do_map)
def mapValues(self, func):
return self._unary(func, _do_map_values)
def flatMap(self, func):
_flat_mapped = self._unary(func, _do_flat_map)
return _flat_mapped.save_as(
name=str(uuid.uuid1()),
namespace=_flat_mapped.namespace,
partition=self._partitions,
need_cleanup=True,
)
def applyPartitions(self, func):
return self._unary(func, _do_apply_partitions)
def mapPartitions(self, func, preserves_partitioning=False):
un_shuffled = self._unary(func, _do_map_partitions)
if preserves_partitioning:
return un_shuffled
return un_shuffled.save_as(
name=str(uuid.uuid1()),
namespace=un_shuffled.namespace,
partition=self._partitions,
need_cleanup=True,
)
def mapPartitionsWithIndex(self, func, preserves_partitioning=False):
un_shuffled = self._unary(func, _do_map_partitions_with_index)
if preserves_partitioning:
return un_shuffled
return un_shuffled.save_as(
name=str(uuid.uuid1()),
namespace=un_shuffled.namespace,
partition=self._partitions,
need_cleanup=True,
)
def mapReducePartitions(self, mapper, reducer):
dup = _create_table(
self._session,
str(uuid.uuid1()),
self.namespace,
self._partitions,
need_cleanup=True,
)
def _dict_reduce(a: dict, b: dict):
for k, v in b.items():
if k not in a:
a[k] = v
else:
a[k] = reducer(a[k], v)
return a
def _local_map_reduce(it):
ret = {}
for _k, _v in mapper(it):
if _k not in ret:
ret[_k] = _v
else:
ret[_k] = reducer(ret[_k], _v)
return ret
dup.put_all(self.applyPartitions(_local_map_reduce).reduce(_dict_reduce).items())
return dup
def glom(self):
return self._unary(None, _do_glom)
def sample(self, fraction, seed=None):
return self._unary((fraction, seed), _do_sample)
def filter(self, func):
return self._unary(func, _do_filter)
def join(self, other: "Table", func):
return self._binary(other, func, _do_join)
def subtractByKey(self, other: "Table"):
func = f"{self._namespace}.{self._name}-{other._namespace}.{other._name}"
return self._binary(other, func, _do_subtract_by_key)
def union(self, other: "Table", func=lambda v1, v2: v1):
return self._binary(other, func, _do_union)
# noinspection PyProtectedMember
def _map_reduce(self, mapper, reducer):
results = self._session._submit_map_reduce_in_partitions(
mapper, reducer, self._partitions, self._name, self._namespace
)
result = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=self._partitions,
)
def _unary(self, func, do_func):
# noinspection PyProtectedMember
results = self._session._submit_unary(func, do_func, self._partitions, self._name, self._namespace)
result = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=self._partitions,
)
def _binary(self, other: "Table", func, do_func):
session_id = self._session.session_id
left, right = self, other
if left._partitions != right._partitions:
if other.count() > self.count():
left = left.save_as(str(uuid.uuid1()), session_id, partition=right._partitions)
else:
right = other.save_as(str(uuid.uuid1()), session_id, partition=left._partitions)
# noinspection PyProtectedMember
results = self._session._submit_binary(
func,
do_func,
left._partitions,
left._name,
left._namespace,
right._name,
right._namespace,
)
result: _Operand = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=left._partitions,
)
def save_as(self, name, namespace, partition=None, need_cleanup=True):
if partition is None:
partition = self._partitions
# noinspection PyProtectedMember
dup = _create_table(self._session, name, namespace, partition, need_cleanup)
dup.put_all(self.collect())
return dup
def _get_env_for_partition(self, p: int, write=False):
return _get_env(self._namespace, self._name, str(p), write=write)
def put(self, k, v):
k_bytes, v_bytes = _kv_to_bytes(k=k, v=v)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p, write=True) as env:
with env.begin(write=True) as txn:
return txn.put(k_bytes, v_bytes)
def put_all(self, kv_list: Iterable):
txn_map = {}
is_success = True
with ExitStack() as s:
for p in range(self._partitions):
env = s.enter_context(self._get_env_for_partition(p, write=True))
txn_map[p] = env, env.begin(write=True)
for k, v in kv_list:
try:
k_bytes, v_bytes = _kv_to_bytes(k=k, v=v)
p = _hash_key_to_partition(k_bytes, self._partitions)
is_success = is_success and txn_map[p][1].put(k_bytes, v_bytes)
except Exception as e:
is_success = False
LOGGER.exception(f"put_all for k={k} v={v} fail. exception: {e}")
break
for p, (env, txn) in txn_map.items():
txn.commit() if is_success else txn.abort()
def get(self, k):
k_bytes = _k_to_bytes(k=k)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p) as env:
with env.begin(write=True) as txn:
old_value_bytes = txn.get(k_bytes)
return None if old_value_bytes is None else deserialize(old_value_bytes)
def delete(self, k):
k_bytes = _k_to_bytes(k=k)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p, write=True) as env:
with env.begin(write=True) as txn:
old_value_bytes = txn.get(k_bytes)
if txn.delete(k_bytes):
return None if old_value_bytes is None else deserialize(old_value_bytes)
return None
# noinspection PyMethodMayBeStatic
class Session(object):
def __init__(self, session_id, max_workers=None):
self.session_id = session_id
self._pool = Executor(max_workers=max_workers)
def __getstate__(self):
# session won't be pickled
pass
def load(self, name, namespace):
return _load_table(session=self, name=name, namespace=namespace)
def create_table(self, name, namespace, partitions, need_cleanup, error_if_exist):
return _create_table(
session=self,
name=name,
namespace=namespace,
partitions=partitions,
need_cleanup=need_cleanup,
error_if_exist=error_if_exist,
)
# noinspection PyUnusedLocal
def parallelize(self, data: Iterable, partition: int, include_key: bool = False, **kwargs):
if not include_key:
data = enumerate(data)
table = _create_table(
session=self,
name=str(uuid.uuid1()),
namespace=self.session_id,
partitions=partition,
)
table.put_all(data)
return table
def cleanup(self, name, namespace):
data_path = _data_dir
if not data_path.is_dir():
LOGGER.error(f"illegal data dir: {data_path}")
return
namespace_dir = data_path.joinpath(namespace)
if not namespace_dir.is_dir():
return
if name == "*":
shutil.rmtree(namespace_dir, True)
return
for table in namespace_dir.glob(name):
shutil.rmtree(table, True)
def stop(self):
self.cleanup(name="*", namespace=self.session_id)
self._pool.shutdown()
def kill(self):
self.cleanup(name="*", namespace=self.session_id)
self._pool.shutdown()
def _submit_unary(self, func, _do_func, partitions, name, namespace):
task_info = _TaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
function_bytes=f_pickle.dumps(func),
)
futures = []
for p in range(partitions):
futures.append(
self._pool.submit(
_do_func,
_UnaryProcess(task_info, _Operand(namespace, name, p, partitions)),
)
)
results = [r.result() for r in futures]
return results
def _submit_map_reduce_in_partitions(self, mapper, reducer, partitions, name, namespace):
task_info = _MapReduceTaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
map_function_bytes=f_pickle.dumps(mapper),
reduce_function_bytes=f_pickle.dumps(reducer),
)
futures = []
for p in range(partitions):
futures.append(
self._pool.submit(
_do_map_reduce_in_partitions,
_MapReduceProcess(task_info, _Operand(namespace, name, p, partitions)),
)
)
results = [r.result() for r in futures]
return results
def _submit_binary(self, func, do_func, partitions, name, namespace, other_name, other_namespace):
task_info = _TaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
function_bytes=f_pickle.dumps(func),
)
futures = []
for p in range(partitions):
left = _Operand(namespace, name, p, partitions)
right = _Operand(other_namespace, other_name, p, partitions)
futures.append(self._pool.submit(do_func, _BinaryProcess(task_info, left, right)))
results = [r.result() for r in futures]
return results
def _get_splits(obj, max_message_size):
obj_bytes = serialize(obj, protocol=4)
byte_size = len(obj_bytes)
num_slice = (byte_size - 1) // max_message_size + 1
if num_slice <= 1:
return obj, num_slice
else:
_max_size = max_message_size
kv = [(i, obj_bytes[slice(i * _max_size, (i + 1) * _max_size)]) for i in range(num_slice)]
return kv, num_slice
class _FederationMetaManager:
STATUS_TABLE_NAME_PREFIX = "__federation_status__"
OBJECT_TABLE_NAME_PREFIX = "__federation_object__"
def __init__(self, session_id, party) -> None:
self.session_id = session_id
self.party = party
self._env = {}
async def awiat_status_set(self, key):
value = self.get_status(key)
while value is None:
await asyncio.sleep(0.1)
value = self.get_status(key)
LOGGER.debug("[GET] Got {} type {}".format(key, "Table" if isinstance(value, tuple) else "Object"))
return value
def get_status(self, key):
return self._get(self._get_status_table_name(self.party), key)
def set_status(self, party, key, value):
return self._set(self._get_status_table_name(party), key, value)
def ack_status(self, key):
return self._ack(self._get_status_table_name(self.party), key)
def get_object(self, key):
return self._get(self._get_object_table_name(self.party), key)
def set_object(self, party, key, value):
return self._set(self._get_object_table_name(party), key, value)
def ack_object(self, key):
return self._ack(self._get_object_table_name(self.party), key)
def _get_status_table_name(self, party):
return f"{self.STATUS_TABLE_NAME_PREFIX}.{party.role}_{party.party_id}"
def _get_object_table_name(self, party):
return f"{self.OBJECT_TABLE_NAME_PREFIX}.{party.role}_{party.party_id}"
def _get_env(self, name):
if name not in self._env:
self._env[name] = _get_env(self.session_id, name, str(0), write=True)
return self._env[name]
def _get(self, name, key):
env = self._get_env(name)
with env.begin(write=False) as txn:
old_value_bytes = txn.get(serialize(key))
if old_value_bytes is not None:
old_value_bytes = deserialize(old_value_bytes)
return old_value_bytes
def _set(self, name, key, value):
env = self._get_env(name)
with env.begin(write=True) as txn:
return txn.put(serialize(key), serialize(value))
def _ack(self, name, key):
env = self._get_env(name)
with env.begin(write=True) as txn:
txn.delete(serialize(key))
class Federation(object):
def _federation_object_key(self, name, tag, s_party, d_party):
return f"{self._session_id}-{name}-{tag}-{s_party.role}-{s_party.party_id}-{d_party.role}-{d_party.party_id}"
def __init__(self, session: Session, session_id, party: Party):
self._session_id = session_id
self._party: Party = party
self._session = session
self._max_message_size = DEFAULT_MESSAGE_MAX_SIZE
self._even_loop = None
self._meta = _FederationMetaManager(session_id, party)
def destroy(self):
self._session.cleanup(namespace=self._session_id, name="*")
@property
def _loop(self):
if self._even_loop is None:
self._even_loop = asyncio.get_event_loop()
return self._even_loop
# noinspection PyUnusedLocal
def remote(self, v, name: str, tag: str, parties: typing.List[Party]):
log_str = f"federation.standalone.remote.{name}.{tag}"
if v is None:
raise ValueError(f"[{log_str}]remote `None` to {parties}")
LOGGER.debug(f"[{log_str}]remote data, type={type(v)}")
if isinstance(v, Table):
dtype = FederationDataType.TABLE
LOGGER.debug(
f"[{log_str}]remote "
f"Table(namespace={v.namespace}, name={v.name}, partitions={v.partitions}), dtype={dtype}"
)
else:
v_splits, num_slice = _get_splits(v, self._max_message_size)
if num_slice > 1:
v = _create_table(
session=self._session,
name=str(uuid.uuid1()),
namespace=self._session_id,
partitions=1,
need_cleanup=True,
error_if_exist=False,
)
v.put_all(kv_list=v_splits)
dtype = FederationDataType.SPLIT_OBJECT
LOGGER.debug(
f"[{log_str}]remote "
f"Table(namespace={v.namespace}, name={v.name}, partitions={v.partitions}), dtype={dtype}"
)
else:
LOGGER.debug(f"[{log_str}]remote object with type: {type(v)}")
dtype = FederationDataType.OBJECT
for party in parties:
_tagged_key = self._federation_object_key(name, tag, self._party, party)
if isinstance(v, Table):
saved_name = str(uuid.uuid1())
LOGGER.debug(
f"[{log_str}]save Table(namespace={v.namespace}, name={v.name}, partitions={v.partitions}) as "
f"Table(namespace={v.namespace}, name={saved_name}, partitions={v.partitions})"
)
_v = v.save_as(name=saved_name, namespace=v.namespace, need_cleanup=False)
self._meta.set_status(party, _tagged_key, (_v.name, _v.namespace, dtype))
else:
self._meta.set_object(party, _tagged_key, v)
self._meta.set_status(party, _tagged_key, _tagged_key)
# noinspection PyProtectedMember
def get(self, name: str, tag: str, parties: typing.List[Party]) -> typing.List:
log_str = f"federation.standalone.get.{name}.{tag}"
LOGGER.debug(f"[{log_str}]")
tasks = []
for party in parties:
_tagged_key = self._federation_object_key(name, tag, party, self._party)
tasks.append(self._meta.awiat_status_set(_tagged_key))
results = self._loop.run_until_complete(asyncio.gather(*tasks))
rtn = []
for r in results:
if isinstance(r, tuple):
# noinspection PyTypeChecker
table: Table = _load_table(session=self._session, name=r[0], namespace=r[1], need_cleanup=True)
dtype = r[2]
LOGGER.debug(
f"[{log_str}] got "
f"Table(namespace={table.namespace}, name={table.name}, partitions={table.partitions}), dtype={dtype}")
if dtype == FederationDataType.SPLIT_OBJECT:
obj_bytes = b"".join(map(lambda t: t[1], sorted(table.collect(), key=lambda x: x[0])))
obj = deserialize(obj_bytes)
rtn.append(obj)
else:
rtn.append(table)
else:
obj = self._meta.get_object(r)
if obj is None:
raise EnvironmentError(f"federation get None from {parties} with name {name}, tag {tag}")
rtn.append(obj)
self._meta.ack_object(r)
LOGGER.debug(f"[{log_str}] got object with type: {type(obj)}")
self._meta.ack_status(r)
return rtn
class _TableMetaManager:
namespace = "__META__"
name = "fragments"
num_partitions = 10
_env = {}
@classmethod
def _get_meta_env(cls, namespace, name):
k_bytes = _k_to_bytes(f"{namespace}.{name}")
p = _hash_key_to_partition(k_bytes, cls.num_partitions)
if p not in cls._env:
cls._env[p] = _get_env(cls.namespace, cls.name, str(p), write=True)
return k_bytes, cls._env[p]
@classmethod
def add_table_meta(cls, namespace, name, num_partitions):
k_bytes, env = cls._get_meta_env(namespace, name)
with env.begin(write=True) as txn:
return txn.put(k_bytes, serialize(num_partitions))
@classmethod
def get_table_meta(cls, namespace, name):
k_bytes, env = cls._get_meta_env(namespace, name)
with env.begin(write=False) as txn:
old_value_bytes = txn.get(k_bytes)
if old_value_bytes is not None:
old_value_bytes = deserialize(old_value_bytes)
return old_value_bytes
@classmethod
def destory_table(cls, namespace, name):
k_bytes, env = cls._get_meta_env(namespace, name)
with env.begin(write=True) as txn:
txn.delete(k_bytes)
path = _data_dir.joinpath(namespace, name)
shutil.rmtree(path, ignore_errors=True)
_data_dir = Path(file_utils.get_project_base_directory()).joinpath("data").absolute()
def _create_table(
session: "Session",
name: str,
namespace: str,
partitions: int,
need_cleanup=True,
error_if_exist=False,
):
assert isinstance(namespace, str)
assert isinstance(name, str)
assert isinstance(partitions, int)
exist_partitions = _TableMetaManager.get_table_meta(namespace, name)
if exist_partitions is None:
_TableMetaManager.add_table_meta(namespace, name, partitions)
else:
if error_if_exist:
raise RuntimeError(f"table already exist: name={name}, namespace={namespace}")
partitions = exist_partitions
return Table(
session=session,
namespace=namespace,
name=name,
partitions=partitions,
need_cleanup=need_cleanup,
)
def _load_table(session, name, namespace, need_cleanup=False):
partitions = _TableMetaManager.get_table_meta(namespace, name)
if partitions is None:
raise RuntimeError(f"table not exist: name={name}, namespace={namespace}")
return Table(
session=session,
namespace=namespace,
name=name,
partitions=partitions,
need_cleanup=need_cleanup,
)
class _TaskInfo:
def __init__(self, task_id, function_id, function_bytes):
self.task_id = task_id
self.function_id = function_id
self.function_bytes = function_bytes
self._function_deserialized = None
def get_func(self):
if self._function_deserialized is None:
self._function_deserialized = f_pickle.loads(self.function_bytes)
return self._function_deserialized
class _MapReduceTaskInfo:
def __init__(self, task_id, function_id, map_function_bytes, reduce_function_bytes):
self.task_id = task_id
self.function_id = function_id
self.map_function_bytes = map_function_bytes
self.reduce_function_bytes = reduce_function_bytes
self._reduce_function_deserialized = None
self._mapper_function_deserialized = None
def get_mapper(self):
if self._mapper_function_deserialized is None:
self._mapper_function_deserialized = f_pickle.loads(self.map_function_bytes)
return self._mapper_function_deserialized
def get_reducer(self):
if self._reduce_function_deserialized is None:
self._reduce_function_deserialized = f_pickle.loads(self.reduce_function_bytes)
return self._reduce_function_deserialized
class _Operand:
def __init__(self, namespace, name, partition, num_partitions):
self.namespace = namespace
self.name = name
self.partition = partition
self.num_partitions = num_partitions
def as_env(self, write=False):
return _get_env(self.namespace, self.name, str(self.partition), write=write)
class _UnaryProcess:
def __init__(self, task_info: _TaskInfo, operand: _Operand):
self.info = task_info
self.operand = operand
def output_operand(self):
return _Operand(
self.info.task_id,
self.info.function_id,
self.operand.partition,
self.operand.num_partitions,
)
def get_func(self):
return self.info.get_func()
class _MapReduceProcess:
def __init__(self, task_info: _MapReduceTaskInfo, operand: _Operand):
self.info = task_info
self.operand = operand
def output_operand(self):
return _Operand(
self.info.task_id,
self.info.function_id,
self.operand.partition,
self.operand.num_partitions,
)
def get_mapper(self):
return self.info.get_mapper()
def get_reducer(self):
return self.info.get_reducer()
class _BinaryProcess:
def __init__(self, task_info: _TaskInfo, left: _Operand, right: _Operand):
self.info = task_info
self.left = left
self.right = right
def output_operand(self):
return _Operand(
self.info.task_id,
self.info.function_id,
self.left.partition,
self.left.num_partitions,
)
def get_func(self):
return self.info.get_func()
def _get_env(*args, write=False):
_path = _data_dir.joinpath(*args)
return _open_env(_path, write=write)
# @cached(cache=EvictLRUCache(maxsize=64, evict=_evict))
def _open_env(path, write=False):
path.mkdir(parents=True, exist_ok=True)
t = 0
while t < 100:
try:
env = lmdb.open(
path.as_posix(),
create=True,
max_dbs=1,
max_readers=1024,
lock=write,
sync=True,
map_size=10_737_418_240,
)
return env
except lmdb.Error as e:
if "No such file or directory" in e.args[0]:
time.sleep(0.01)
t += 1
else:
raise e
raise lmdb.Error(f"No such file or directory: {path}, with {t} times retry")
def _hash_key_to_partition(key, partitions):
_key = hashlib.sha1(key).digest()
if isinstance(_key, bytes):
_key = int.from_bytes(_key, byteorder="little", signed=False)
if partitions < 1:
raise ValueError("partitions must be a positive number")
b, j = -1, 0
while j < partitions:
b = int(j)
_key = ((_key * 2862933555777941757) + 1) & 0xFFFFFFFFFFFFFFFF
j = float(b + 1) * (float(1 << 31) / float((_key >> 33) + 1))
return int(b)
def _do_map(p: _UnaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
txn_map = {}
for partition in range(p.operand.num_partitions):
env = s.enter_context(_get_env(rtn.namespace, rtn.name, str(partition), write=True))
txn_map[partition] = s.enter_context(env.begin(write=True))
source_txn = s.enter_context(source_env.begin())
cursor = s.enter_context(source_txn.cursor())
for k_bytes, v_bytes in cursor:
k, v = deserialize(k_bytes), deserialize(v_bytes)
k1, v1 = p.get_func()(k, v)
k1_bytes, v1_bytes = serialize(k1), serialize(v1)
partition = _hash_key_to_partition(k1_bytes, p.operand.num_partitions)
txn_map[partition].put(k1_bytes, v1_bytes)
return rtn
def _generator_from_cursor(cursor):
for k, v in cursor:
yield deserialize(k), deserialize(v)
def _do_apply_partitions(p: _UnaryProcess):
with ExitStack() as s:
rtn = p.output_operand()
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
v = p.get_func()(_generator_from_cursor(cursor))
if cursor.last():
k_bytes = cursor.key()
dst_txn.put(k_bytes, serialize(v))
return rtn
def _do_map_partitions(p: _UnaryProcess):
with ExitStack() as s:
rtn = p.output_operand()
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
v = p.get_func()(_generator_from_cursor(cursor))
if isinstance(v, Iterable):
for k1, v1 in v:
dst_txn.put(serialize(k1), serialize(v1))
else:
k_bytes = cursor.key()
dst_txn.put(k_bytes, serialize(v))
return rtn
def _do_map_partitions_with_index(p: _UnaryProcess):
with ExitStack() as s:
rtn = p.output_operand()
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
v = p.get_func()(p.operand.partition, _generator_from_cursor(cursor))
if isinstance(v, Iterable):
for k1, v1 in v:
dst_txn.put(serialize(k1), serialize(v1))
else:
k_bytes = cursor.key()
dst_txn.put(k_bytes, serialize(v))
return rtn
def _do_map_reduce_in_partitions(p: _MapReduceProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
partitions = p.operand.num_partitions
txn_map = {}
for partition in range(partitions):
env = s.enter_context(_get_env(rtn.namespace, rtn.name, str(partition), write=True))
txn_map[partition] = s.enter_context(env.begin(write=True))
source_txn = s.enter_context(source_env.begin())
cursor = s.enter_context(source_txn.cursor())
mapped = p.get_mapper()(_generator_from_cursor(cursor))
if not isinstance(mapped, Iterable):
raise ValueError("mapper function should return a iterable of pair")
reducer = p.get_reducer()
for k, v in mapped:
k_bytes = serialize(k)
partition = _hash_key_to_partition(k_bytes, partitions)
# todo: not atomic, fix me
pre_v = txn_map[partition].get(k_bytes, None)
if pre_v is None:
txn_map[partition].put(k_bytes, serialize(v))
else:
txn_map[partition].put(k_bytes, serialize(reducer(deserialize(pre_v), v)))
return rtn
def _do_map_values(p: _UnaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
for k_bytes, v_bytes in cursor:
v = deserialize(v_bytes)
v1 = p.get_func()(v)
dst_txn.put(k_bytes, serialize(v1))
return rtn
def _do_flat_map(p: _UnaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
for k_bytes, v_bytes in cursor:
k = deserialize(k_bytes)
v = deserialize(v_bytes)
map_result = p.get_func()(k, v)
for result_k, result_v in map_result:
dst_txn.put(serialize(result_k), serialize(result_v))
return rtn
def _do_reduce(p: _UnaryProcess):
value = None
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
source_txn = s.enter_context(source_env.begin())
cursor = s.enter_context(source_txn.cursor())
for k_bytes, v_bytes in cursor:
v = deserialize(v_bytes)
if value is None:
value = v
else:
value = p.get_func()(value, v)
return value
def _do_glom(p: _UnaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dest_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
v_list = []
k_bytes = None
for k, v in cursor:
v_list.append((deserialize(k), deserialize(v)))
k_bytes = k
if k_bytes is not None:
dest_txn.put(k_bytes, serialize(v_list))
return rtn
def _do_sample(p: _UnaryProcess):
rtn = p.output_operand()
fraction, seed = deserialize(p.info.function_bytes)
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
cursor.first()
random_state = np.random.RandomState(seed)
for k, v in cursor:
# noinspection PyArgumentList
if random_state.rand() < fraction:
dst_txn.put(k, v)
return rtn
def _do_filter(p: _UnaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
source_env = s.enter_context(p.operand.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
source_txn = s.enter_context(source_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(source_txn.cursor())
for k_bytes, v_bytes in cursor:
k = deserialize(k_bytes)
v = deserialize(v_bytes)
if p.get_func()(k, v):
dst_txn.put(k_bytes, v_bytes)
return rtn
def _do_subtract_by_key(p: _BinaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
left_op = p.left
right_op = p.right
right_env = s.enter_context(right_op.as_env())
left_env = s.enter_context(left_op.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
left_txn = s.enter_context(left_env.begin())
right_txn = s.enter_context(right_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(left_txn.cursor())
for k_bytes, left_v_bytes in cursor:
right_v_bytes = right_txn.get(k_bytes)
if right_v_bytes is None:
dst_txn.put(k_bytes, left_v_bytes)
return rtn
def _do_join(p: _BinaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
right_env = s.enter_context(p.right.as_env())
left_env = s.enter_context(p.left.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
left_txn = s.enter_context(left_env.begin())
right_txn = s.enter_context(right_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
cursor = s.enter_context(left_txn.cursor())
for k_bytes, v1_bytes in cursor:
v2_bytes = right_txn.get(k_bytes)
if v2_bytes is None:
continue
v1 = deserialize(v1_bytes)
v2 = deserialize(v2_bytes)
v3 = p.get_func()(v1, v2)
dst_txn.put(k_bytes, serialize(v3))
return rtn
def _do_union(p: _BinaryProcess):
rtn = p.output_operand()
with ExitStack() as s:
left_env = s.enter_context(p.left.as_env())
right_env = s.enter_context(p.right.as_env())
dst_env = s.enter_context(rtn.as_env(write=True))
left_txn = s.enter_context(left_env.begin())
right_txn = s.enter_context(right_env.begin())
dst_txn = s.enter_context(dst_env.begin(write=True))
# process left op
with left_txn.cursor() as left_cursor:
for k_bytes, left_v_bytes in left_cursor:
right_v_bytes = right_txn.get(k_bytes)
if right_v_bytes is None:
dst_txn.put(k_bytes, left_v_bytes)
else:
left_v = deserialize(left_v_bytes)
right_v = deserialize(right_v_bytes)
final_v = p.get_func()(left_v, right_v)
dst_txn.put(k_bytes, serialize(final_v))
# process right op
with right_txn.cursor() as right_cursor:
for k_bytes, right_v_bytes in right_cursor:
final_v_bytes = dst_txn.get(k_bytes)
if final_v_bytes is None:
dst_txn.put(k_bytes, right_v_bytes)
return rtn
def _kv_to_bytes(k, v):
return serialize(k), serialize(v)
def _k_to_bytes(k):
return serialize(k)
| 40,195 | 33.355556 | 123 |
py
|
FATE
|
FATE-master/python/fate_arch/common/base_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import datetime
import io
import json
import os
import pickle
import socket
import time
import uuid
from enum import Enum, IntEnum
from fate_arch.common.conf_utils import get_base_config
from fate_arch.common import BaseType
use_deserialize_safe_module = get_base_config('use_deserialize_safe_module', False)
class CustomJSONEncoder(json.JSONEncoder):
def __init__(self, **kwargs):
self._with_type = kwargs.pop("with_type", False)
super().__init__(**kwargs)
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, datetime.timedelta):
return str(obj)
elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum):
return obj.value
elif isinstance(obj, set):
return list(obj)
elif issubclass(type(obj), BaseType):
if not self._with_type:
return obj.to_dict()
else:
return obj.to_dict_with_type()
elif isinstance(obj, type):
return obj.__name__
else:
return json.JSONEncoder.default(self, obj)
def fate_uuid():
return uuid.uuid1().hex
def string_to_bytes(string):
return string if isinstance(string, bytes) else string.encode(encoding="utf-8")
def bytes_to_string(byte):
return byte.decode(encoding="utf-8")
def json_dumps(src, byte=False, indent=None, with_type=False):
dest = json.dumps(src, indent=indent, cls=CustomJSONEncoder, with_type=with_type)
if byte:
dest = string_to_bytes(dest)
return dest
def json_loads(src, object_hook=None, object_pairs_hook=None):
if isinstance(src, bytes):
src = bytes_to_string(src)
return json.loads(src, object_hook=object_hook, object_pairs_hook=object_pairs_hook)
def current_timestamp():
return int(time.time() * 1000)
def timestamp_to_date(timestamp, format_string="%Y-%m-%d %H:%M:%S"):
if not timestamp:
timestamp = time.time()
timestamp = int(timestamp) / 1000
time_array = time.localtime(timestamp)
str_date = time.strftime(format_string, time_array)
return str_date
def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
time_array = time.strptime(time_str, format_string)
time_stamp = int(time.mktime(time_array) * 1000)
return time_stamp
def serialize_b64(src, to_str=False):
dest = base64.b64encode(pickle.dumps(src))
if not to_str:
return dest
else:
return bytes_to_string(dest)
def deserialize_b64(src):
src = base64.b64decode(string_to_bytes(src) if isinstance(src, str) else src)
if use_deserialize_safe_module:
return restricted_loads(src)
return pickle.loads(src)
safe_module = {
'federatedml',
'numpy',
'fate_flow'
}
class RestrictedUnpickler(pickle.Unpickler):
def find_class(self, module, name):
import importlib
if module.split('.')[0] in safe_module:
_module = importlib.import_module(module)
return getattr(_module, name)
# Forbid everything else.
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
(module, name))
def restricted_loads(src):
"""Helper function analogous to pickle.loads()."""
return RestrictedUnpickler(io.BytesIO(src)).load()
def get_lan_ip():
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', string_to_bytes(ifname[:15])))[20:24])
ip = socket.gethostbyname(socket.getfqdn())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"bond1",
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError as e:
pass
return ip or ''
| 4,988 | 27.672414 | 106 |
py
|
FATE
|
FATE-master/python/fate_arch/common/string_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import string
def random_string(string_length=6):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for _ in range(string_length))
def random_number_string(string_length=6):
letters = string.octdigits
return ''.join(random.choice(letters) for _ in range(string_length))
| 942 | 31.517241 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/common/_types.py
|
class EngineType(object):
COMPUTING = "computing"
STORAGE = "storage"
FEDERATION = "federation"
class CoordinationProxyService(object):
ROLLSITE = "rollsite"
NGINX = "nginx"
FATEFLOW = "fateflow"
FIREWORK = "firework"
class CoordinationCommunicationProtocol(object):
HTTP = "http"
GRPC = "grpc"
class FederatedMode(object):
SINGLE = "SINGLE"
MULTIPLE = "MULTIPLE"
def is_single(self, value):
return value == self.SINGLE
def is_multiple(self, value):
return value == self.MULTIPLE
class FederatedCommunicationType(object):
PUSH = "PUSH"
PULL = "PULL"
class BaseType:
def to_dict(self):
return dict([(k.lstrip("_"), v) for k, v in self.__dict__.items()])
def to_dict_with_type(self):
def _dict(obj):
module = None
if issubclass(obj.__class__, BaseType):
data = {}
for attr, v in obj.__dict__.items():
k = attr.lstrip("_")
data[k] = _dict(v)
module = obj.__module__
elif isinstance(obj, (list, tuple)):
data = []
for i, vv in enumerate(obj):
data.append(_dict(vv))
elif isinstance(obj, dict):
data = {}
for _k, vv in obj.items():
data[_k] = _dict(vv)
else:
data = obj
return {"type": obj.__class__.__name__, "data": data, "module": module}
return _dict(self)
class Party(BaseType):
"""
Uniquely identify
"""
def __init__(self, role, party_id):
self.role = str(role)
self.party_id = str(party_id)
def __hash__(self):
return (self.role, self.party_id).__hash__()
def __str__(self):
return f"Party(role={self.role}, party_id={self.party_id})"
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return (self.role, self.party_id) < (other.role, other.party_id)
def __eq__(self, other):
return self.party_id == other.party_id and self.role == other.role
class DTable(BaseType):
def __init__(self, namespace, name, partitions=None):
self._name = name
self._namespace = namespace
self._partitions = partitions
def __str__(self):
return f"DTable(namespace={self._namespace}, name={self._name})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self._namespace == other.namespace and self._name == other.name
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def partitions(self):
return self._partitions
| 2,814 | 23.478261 | 83 |
py
|
FATE
|
FATE-master/python/fate_arch/common/_parties.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from fate_arch.common import Party
class Role:
def __init__(self, parties) -> None:
self._parties = parties
self._size = len(self._parties)
def __getitem__(self, key):
return self._parties[key]
class _PartiesMeta(type):
@property
def Guest(cls) -> Role:
return cls._get_instance()._guest
@property
def Host(cls) -> Role:
return cls._get_instance()._host
@property
def Arbiter(cls) -> Role:
return cls._get_instance()._arbiter
class PartiesInfo(metaclass=_PartiesMeta):
_instance = None
@classmethod
def _set_instance(cls, inst):
cls._instance = inst
@classmethod
def _get_instance(cls) -> "PartiesInfo":
if cls._instance is None:
raise RuntimeError(f"parties not initialized")
return cls._instance
@classmethod
def get_parties(cls, parties) -> typing.List[Party]:
if isinstance(parties, Party):
return [parties]
elif isinstance(parties, Role):
return parties[:]
elif isinstance(parties, list):
plain_parties = []
for p in parties:
plain_parties.extend(cls.get_parties(p))
if len(set(plain_parties)) != len(plain_parties):
raise ValueError(f"duplicated parties exsits: {plain_parties}")
return plain_parties
raise ValueError(f"unsupported type: {type(parties)}")
@staticmethod
def from_conf(conf: typing.MutableMapping[str, dict]):
try:
local = Party(
role=conf["local"]["role"], party_id=conf["local"]["party_id"]
)
role_to_parties = {}
for role, party_id_list in conf.get("role", {}).items():
role_to_parties[role] = [
Party(role=role, party_id=party_id) for party_id in party_id_list
]
except Exception as e:
raise RuntimeError(
"conf parse error, a correct configuration could be:\n"
"{\n"
" 'local': {'role': 'guest', 'party_id': 10000},\n"
" 'role': {'guest': [10000], 'host': [9999, 9998]}, 'arbiter': [9997]}\n"
"}"
) from e
return PartiesInfo(local, role_to_parties)
def __init__(
self,
local: Party,
role_to_parties: typing.MutableMapping[str, typing.List[Party]],
):
self._local = local
self._role_to_parties = role_to_parties
self._guest = Role(role_to_parties.get("guest", []))
self._host = Role(role_to_parties.get("host", []))
self._arbiter = Role(role_to_parties.get("arbiter", []))
self._set_instance(self)
@property
def local_party(self) -> Party:
return self._local
@property
def all_parties(self):
return [
party for parties in self._role_to_parties.values() for party in parties
]
@property
def role_set(self):
return set(self._role_to_parties)
def roles_to_parties(self, roles: typing.Iterable, strict=True) -> list:
parties = []
for role in roles:
if role not in self._role_to_parties:
if strict:
raise RuntimeError(
f"try to get role {role} "
f"which is not configured in `role` in runtime conf({self._role_to_parties})"
)
else:
continue
parties.extend(self._role_to_parties[role])
return parties
def role_to_party(self, role, idx) -> Party:
return self._role_to_parties[role][idx]
__all__ = ["PartiesInfo", "Role"]
| 4,396 | 29.324138 | 101 |
py
|
FATE
|
FATE-master/python/fate_arch/common/path_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fate_arch.common import file_utils
def get_data_table_count(path):
count = 0
config_path = os.path.join(path, "config.yaml")
if not os.path.exists(config_path):
return count
config = file_utils.load_yaml_conf(conf_path=config_path)
if config:
if config.get("type") != "vision":
raise Exception(f"can not support this type {config.get('type')}")
ext = config.get("inputs").get("ext")
base_dir = os.path.join(path, "images")
for file_name in os.listdir(base_dir):
if file_name.endswith(ext):
count += 1
return count
| 1,253 | 32.891892 | 78 |
py
|
FATE
|
FATE-master/python/fate_arch/common/profile.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import time
import typing
import beautifultable
from fate_arch.common.log import getLogger
import inspect
from functools import wraps
from fate_arch.abc import CTableABC
profile_logger = getLogger("PROFILING")
_PROFILE_LOG_ENABLED = False
_START_TIME = None
_END_TIME = None
class _TimerItem(object):
def __init__(self):
self.count = 0
self.total_time = 0.0
self.max_time = 0.0
def union(self, other: '_TimerItem'):
self.count += other.count
self.total_time += other.total_time
if self.max_time < other.max_time:
self.max_time = other.max_time
def add(self, elapse_time):
self.count += 1
self.total_time += elapse_time
if elapse_time > self.max_time:
self.max_time = elapse_time
@property
def mean(self):
if self.count == 0:
return 0.0
return self.total_time / self.count
def as_list(self):
return [self.count, self.total_time, self.mean, self.max_time]
def __str__(self):
return f"n={self.count}, sum={self.total_time:.4f}, mean={self.mean:.4f}, max={self.max_time:.4f}"
def __repr__(self):
return self.__str__()
class _ComputingTimerItem(object):
def __init__(self, function_name: str, function_stack):
self.function_name = function_name
self.function_stack = function_stack
self.item = _TimerItem()
class _ComputingTimer(object):
_STATS: typing.MutableMapping[str, _ComputingTimerItem] = {}
def __init__(self, function_name: str, function_stack_list):
self._start = time.time()
function_stack = "\n".join(function_stack_list)
self._hash = hashlib.blake2b(function_stack.encode('utf-8'), digest_size=5).hexdigest()
if self._hash not in self._STATS:
self._STATS[self._hash] = _ComputingTimerItem(function_name, function_stack)
if _PROFILE_LOG_ENABLED:
profile_logger.debug(f"[computing#{self._hash}]function_stack: {' <-'.join(function_stack_list)}")
if _PROFILE_LOG_ENABLED:
profile_logger.debug(f"[computing#{self._hash}]start")
def done(self, function_string):
elapse = time.time() - self._start
self._STATS[self._hash].item.add(elapse)
if _PROFILE_LOG_ENABLED:
profile_logger.debug(f"[computing#{self._hash}]done, elapse: {elapse}, function: {function_string}")
@classmethod
def computing_statistics_table(cls, timer_aggregator: _TimerItem = None):
stack_table = beautifultable.BeautifulTable(110, precision=4, detect_numerics=False)
stack_table.columns.header = ["function", "n", "sum(s)", "mean(s)", "max(s)", "stack_hash", "stack"]
stack_table.columns.alignment["stack"] = beautifultable.ALIGN_LEFT
stack_table.columns.header.alignment = beautifultable.ALIGN_CENTER
stack_table.border.left = ''
stack_table.border.right = ''
stack_table.border.bottom = ''
stack_table.border.top = ''
function_table = beautifultable.BeautifulTable(110)
function_table.set_style(beautifultable.STYLE_COMPACT)
function_table.columns.header = ["function", "n", "sum(s)", "mean(s)", "max(s)"]
aggregate = {}
total = _TimerItem()
for hash_id, timer in cls._STATS.items():
stack_table.rows.append([timer.function_name, *timer.item.as_list(), hash_id, timer.function_stack])
aggregate.setdefault(timer.function_name, _TimerItem()).union(timer.item)
total.union(timer.item)
for function_name, item in aggregate.items():
function_table.rows.append([function_name, *item.as_list()])
detailed_base_table = beautifultable.BeautifulTable(120)
stack_table.rows.sort("sum(s)", reverse=True)
detailed_base_table.rows.append(["stack", stack_table])
detailed_base_table.rows.append(["total", total])
base_table = beautifultable.BeautifulTable(120)
function_table.rows.sort("sum(s)", reverse=True)
base_table.rows.append(["function", function_table])
base_table.rows.append(["total", total])
if timer_aggregator:
timer_aggregator.union(total)
return base_table.get_string(), detailed_base_table.get_string()
class _FederationTimer(object):
_GET_STATS: typing.MutableMapping[str, _TimerItem] = {}
_REMOTE_STATS: typing.MutableMapping[str, _TimerItem] = {}
@classmethod
def federation_statistics_table(cls, timer_aggregator: _TimerItem = None):
total = _TimerItem()
get_table = beautifultable.BeautifulTable(110)
get_table.columns.header = ["name", "n", "sum(s)", "mean(s)", "max(s)"]
for name, item in cls._GET_STATS.items():
get_table.rows.append([name, *item.as_list()])
total.union(item)
get_table.rows.sort("sum(s)", reverse=True)
get_table.border.left = ''
get_table.border.right = ''
get_table.border.bottom = ''
get_table.border.top = ''
remote_table = beautifultable.BeautifulTable(110)
remote_table.columns.header = ["name", "n", "sum(s)", "mean(s)", "max(s)"]
for name, item in cls._REMOTE_STATS.items():
remote_table.rows.append([name, *item.as_list()])
total.union(item)
remote_table.rows.sort("sum(s)", reverse=True)
remote_table.border.left = ''
remote_table.border.right = ''
remote_table.border.bottom = ''
remote_table.border.top = ''
base_table = beautifultable.BeautifulTable(120)
base_table.rows.append(["get", get_table])
base_table.rows.append(["remote", remote_table])
base_table.rows.append(["total", total])
if timer_aggregator:
timer_aggregator.union(total)
return base_table.get_string()
class _FederationRemoteTimer(_FederationTimer):
def __init__(self, name, full_name, tag, local, parties):
self._name = name
self._full_name = full_name
self._tag = tag
self._local_party = local
self._parties = parties
self._start_time = time.time()
self._end_time = None
if self._full_name not in self._REMOTE_STATS:
self._REMOTE_STATS[self._full_name] = _TimerItem()
def done(self, federation):
self._end_time = time.time()
self._REMOTE_STATS[self._full_name].add(self.elapse)
profile_logger.debug(f"[federation.remote.{self._full_name}.{self._tag}]"
f"{self._local_party}->{self._parties} done")
if is_profile_remote_enable():
federation.remote(v={"start_time": self._start_time, "end_time": self._end_time},
name=self._name,
tag=profile_remote_tag(self._tag),
parties=self._parties,
gc=None)
@property
def elapse(self):
return self._end_time - self._start_time
class _FederationGetTimer(_FederationTimer):
def __init__(self, name, full_name, tag, local, parties):
self._name = name
self._full_name = full_name
self._tag = tag
self._local_party = local
self._parties = parties
self._start_time = time.time()
self._end_time = None
if self._full_name not in self._GET_STATS:
self._GET_STATS[self._full_name] = _TimerItem()
def done(self, federation):
self._end_time = time.time()
self._GET_STATS[self._full_name].add(self.elapse)
profile_logger.debug(f"[federation.get.{self._full_name}.{self._tag}]"
f"{self._local_party}<-{self._parties} done")
if is_profile_remote_enable():
remote_meta = federation.get(name=self._name, tag=profile_remote_tag(self._tag), parties=self._parties,
gc=None)
for party, meta in zip(self._parties, remote_meta):
profile_logger.debug(f"[federation.meta.{self._full_name}.{self._tag}]{self._local_party}<-{party}]"
f"meta={meta}")
@property
def elapse(self):
return self._end_time - self._start_time
def federation_remote_timer(name, full_name, tag, local, parties):
profile_logger.debug(f"[federation.remote.{full_name}.{tag}]{local}->{parties} start")
return _FederationRemoteTimer(name, full_name, tag, local, parties)
def federation_get_timer(name, full_name, tag, local, parties):
profile_logger.debug(f"[federation.get.{full_name}.{tag}]{local}<-{parties} start")
return _FederationGetTimer(name, full_name, tag, local, parties)
def profile_start():
global _PROFILE_LOG_ENABLED
_PROFILE_LOG_ENABLED = True
global _START_TIME
_START_TIME = time.time()
def profile_ends():
global _END_TIME
_END_TIME = time.time()
profile_total_time = _END_TIME - _START_TIME
# gather computing and federation profile statistics
timer_aggregator = _TimerItem()
computing_timer_aggregator = _TimerItem()
federation_timer_aggregator = _TimerItem()
computing_base_table, computing_detailed_table = _ComputingTimer.computing_statistics_table(
timer_aggregator=computing_timer_aggregator)
federation_base_table = _FederationTimer.federation_statistics_table(timer_aggregator=federation_timer_aggregator)
timer_aggregator.union(computing_timer_aggregator)
timer_aggregator.union(federation_timer_aggregator)
# logging
profile_driver_time = profile_total_time - timer_aggregator.total_time
profile_logger.info(
"Total: {:.4f}s, Driver: {:.4f}s({:.2%}), Federation: {:.4f}s({:.2%}), Computing: {:.4f}s({:.2%})".format(
profile_total_time,
profile_driver_time,
profile_driver_time / profile_total_time,
federation_timer_aggregator.total_time,
federation_timer_aggregator.total_time / profile_total_time,
computing_timer_aggregator.total_time,
computing_timer_aggregator.total_time / profile_total_time
)
)
profile_logger.info(f"\nComputing:\n{computing_base_table}\n\nFederation:\n{federation_base_table}\n")
profile_logger.debug(f"\nDetailed Computing:\n{computing_detailed_table}\n")
global _PROFILE_LOG_ENABLED
_PROFILE_LOG_ENABLED = False
def _pretty_table_str(v):
if isinstance(v, CTableABC):
return f"Table(partition={v.partitions})"
else:
return f"{type(v).__name__}"
def _func_annotated_string(func, *args, **kwargs):
pretty_args = []
for k, v in inspect.signature(func).bind(*args, **kwargs).arguments.items():
pretty_args.append(f"{k}: {_pretty_table_str(v)}")
return f"{func.__name__}({', '.join(pretty_args)})"
def _call_stack_strings():
call_stack_strings = []
frames = inspect.getouterframes(inspect.currentframe(), 10)[2:-2]
for frame in frames:
call_stack_strings.append(f"[{frame.filename.split('/')[-1]}:{frame.lineno}]{frame.function}")
return call_stack_strings
def computing_profile(func):
@wraps(func)
def _fn(*args, **kwargs):
function_call_stack = _call_stack_strings()
timer = _ComputingTimer(func.__name__, function_call_stack)
rtn = func(*args, **kwargs)
function_string = f"{_func_annotated_string(func, *args, **kwargs)} -> {_pretty_table_str(rtn)}"
timer.done(function_string)
return rtn
return _fn
__META_REMOTE_ENABLE = False
def enable_profile_remote():
global __META_REMOTE_ENABLE
__META_REMOTE_ENABLE = True
def is_profile_remote_enable():
return __META_REMOTE_ENABLE
def profile_remote_tag(tag):
return f"<remote_profile>_{tag}"
| 12,471 | 35.682353 | 118 |
py
|
FATE
|
FATE-master/python/fate_arch/common/data_utils.py
|
import os
import uuid
from fate_arch.common import file_utils
from fate_arch.storage import StorageEngine
def default_output_info(task_id, task_version, output_type):
return f"output_{output_type}_{task_id}_{task_version}", uuid.uuid1().hex
def default_input_fs_path(name, namespace, prefix=None, storage_engine=StorageEngine.HDFS):
if storage_engine == StorageEngine.HDFS:
return default_hdfs_path(data_type="input", name=name, namespace=namespace, prefix=prefix)
elif storage_engine == StorageEngine.LOCALFS:
return default_localfs_path(data_type="input", name=name, namespace=namespace)
def default_output_fs_path(name, namespace, prefix=None, storage_engine=StorageEngine.HDFS):
if storage_engine == StorageEngine.HDFS:
return default_hdfs_path(data_type="output", name=name, namespace=namespace, prefix=prefix)
elif storage_engine == StorageEngine.LOCALFS:
return default_localfs_path(data_type="output", name=name, namespace=namespace)
def default_localfs_path(name, namespace, data_type):
return os.path.join(file_utils.get_project_base_directory(), 'localfs', data_type, namespace, name)
def default_hdfs_path(data_type, name, namespace, prefix=None):
p = f"/fate/{data_type}_data/{namespace}/{name}"
if prefix:
p = f"{prefix}/{p}"
return p
| 1,336 | 37.2 | 103 |
py
|
FATE
|
FATE-master/python/fate_arch/common/versions.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import dotenv
import typing
from fate_arch.common.file_utils import get_project_base_directory
def get_versions() -> typing.Mapping[str, typing.Any]:
return dotenv.dotenv_values(
dotenv_path=os.path.join(get_project_base_directory(), "fate.env")
)
def get_eggroll_version() -> typing.Optional[str]:
return get_versions().get("EGGROLL")
def get_fate_version() -> typing.Optional[str]:
return get_versions().get("FATE")
| 1,076 | 28.916667 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/common/log.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import traceback
import logging
import os
from logging.handlers import TimedRotatingFileHandler
from threading import RLock
from fate_arch.common import file_utils
class LoggerFactory(object):
TYPE = "FILE"
LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [jobId] [%(process)s:%(thread)s] - [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
LEVEL = logging.DEBUG
logger_dict = {}
global_handler_dict = {}
LOG_DIR = None
PARENT_LOG_DIR = None
log_share = True
append_to_parent_log = None
lock = RLock()
# CRITICAL = 50
# FATAL = CRITICAL
# ERROR = 40
# WARNING = 30
# WARN = WARNING
# INFO = 20
# DEBUG = 10
# NOTSET = 0
levels = (10, 20, 30, 40)
schedule_logger_dict = {}
@staticmethod
def set_directory(directory=None, parent_log_dir=None, append_to_parent_log=None, force=False):
if parent_log_dir:
LoggerFactory.PARENT_LOG_DIR = parent_log_dir
if append_to_parent_log:
LoggerFactory.append_to_parent_log = append_to_parent_log
with LoggerFactory.lock:
if not directory:
directory = file_utils.get_project_base_directory("logs")
if not LoggerFactory.LOG_DIR or force:
LoggerFactory.LOG_DIR = directory
if LoggerFactory.log_share:
oldmask = os.umask(000)
os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
os.umask(oldmask)
else:
os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
for loggerName, ghandler in LoggerFactory.global_handler_dict.items():
for className, (logger, handler) in LoggerFactory.logger_dict.items():
logger.removeHandler(ghandler)
ghandler.close()
LoggerFactory.global_handler_dict = {}
for className, (logger, handler) in LoggerFactory.logger_dict.items():
logger.removeHandler(handler)
_handler = None
if handler:
handler.close()
if className != "default":
_handler = LoggerFactory.get_handler(className)
logger.addHandler(_handler)
LoggerFactory.assemble_global_handler(logger)
LoggerFactory.logger_dict[className] = logger, _handler
@staticmethod
def new_logger(name):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(LoggerFactory.LEVEL)
return logger
@staticmethod
def get_logger(class_name=None):
with LoggerFactory.lock:
if class_name in LoggerFactory.logger_dict.keys():
logger, handler = LoggerFactory.logger_dict[class_name]
if not logger:
logger, handler = LoggerFactory.init_logger(class_name)
else:
logger, handler = LoggerFactory.init_logger(class_name)
return logger
@staticmethod
def get_global_handler(logger_name, level=None, log_dir=None):
if not LoggerFactory.LOG_DIR:
return logging.StreamHandler()
if log_dir:
logger_name_key = logger_name + "_" + log_dir
else:
logger_name_key = logger_name + "_" + LoggerFactory.LOG_DIR
# if loggerName not in LoggerFactory.globalHandlerDict:
if logger_name_key not in LoggerFactory.global_handler_dict:
with LoggerFactory.lock:
if logger_name_key not in LoggerFactory.global_handler_dict:
handler = LoggerFactory.get_handler(logger_name, level, log_dir)
LoggerFactory.global_handler_dict[logger_name_key] = handler
return LoggerFactory.global_handler_dict[logger_name_key]
@staticmethod
def get_handler(class_name, level=None, log_dir=None, log_type=None, job_id=None):
if not log_type:
if not LoggerFactory.LOG_DIR or not class_name:
return logging.StreamHandler()
if not log_dir:
log_file = os.path.join(LoggerFactory.LOG_DIR, "{}.log".format(class_name))
else:
log_file = os.path.join(log_dir, "{}.log".format(class_name))
else:
log_file = os.path.join(log_dir, "fate_flow_{}.log".format(
log_type) if level == LoggerFactory.LEVEL else 'fate_flow_{}_error.log'.format(log_type))
job_id = job_id or os.getenv("FATE_JOB_ID")
if job_id:
formatter = logging.Formatter(LoggerFactory.LOG_FORMAT.replace("jobId", job_id))
else:
formatter = logging.Formatter(LoggerFactory.LOG_FORMAT.replace("jobId", "Server"))
os.makedirs(os.path.dirname(log_file), exist_ok=True)
if LoggerFactory.log_share:
handler = ROpenHandler(log_file,
when='D',
interval=1,
backupCount=14,
delay=True)
else:
handler = TimedRotatingFileHandler(log_file,
when='D',
interval=1,
backupCount=14,
delay=True)
if level:
handler.level = level
handler.setFormatter(formatter)
return handler
@staticmethod
def init_logger(class_name):
with LoggerFactory.lock:
logger = LoggerFactory.new_logger(class_name)
handler = None
if class_name:
handler = LoggerFactory.get_handler(class_name)
logger.addHandler(handler)
LoggerFactory.logger_dict[class_name] = logger, handler
else:
LoggerFactory.logger_dict["default"] = logger, handler
LoggerFactory.assemble_global_handler(logger)
return logger, handler
@staticmethod
def assemble_global_handler(logger):
if LoggerFactory.LOG_DIR:
for level in LoggerFactory.levels:
if level >= LoggerFactory.LEVEL:
level_logger_name = logging._levelToName[level]
logger.addHandler(LoggerFactory.get_global_handler(level_logger_name, level))
if LoggerFactory.append_to_parent_log and LoggerFactory.PARENT_LOG_DIR:
for level in LoggerFactory.levels:
if level >= LoggerFactory.LEVEL:
level_logger_name = logging._levelToName[level]
logger.addHandler(
LoggerFactory.get_global_handler(level_logger_name, level, LoggerFactory.PARENT_LOG_DIR))
def setDirectory(directory=None):
LoggerFactory.set_directory(directory)
def setLevel(level):
LoggerFactory.LEVEL = level
def getLogger(className=None, useLevelFile=False):
if className is None:
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
className = 'stat'
return LoggerFactory.get_logger(className)
def exception_to_trace_string(ex):
return "".join(traceback.TracebackException.from_exception(ex).format())
class ROpenHandler(TimedRotatingFileHandler):
def _open(self):
prevumask = os.umask(000)
rtv = TimedRotatingFileHandler._open(self)
os.umask(prevumask)
return rtv
| 8,142 | 37.051402 | 140 |
py
|
FATE
|
FATE-master/python/fate_arch/common/address.py
|
from fate_arch.abc import AddressABC
from fate_arch.metastore.db_utils import StorageConnector
class AddressBase(AddressABC):
def __init__(self, connector_name=None):
self.connector_name = connector_name
if connector_name:
connector = StorageConnector(connector_name=connector_name)
if connector.get_info():
for k, v in connector.get_info().items():
if hasattr(self, k) and v:
self.__setattr__(k, v)
@property
def connector(self):
return {}
@property
def storage_engine(self):
return
class StandaloneAddress(AddressBase):
def __init__(self, home=None, name=None, namespace=None, storage_type=None, connector_name=None):
self.home = home
self.name = name
self.namespace = namespace
self.storage_type = storage_type
super(StandaloneAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.home, self.name, self.namespace, self.storage_type).__hash__()
def __str__(self):
return f"StandaloneAddress(name={self.name}, namespace={self.namespace})"
def __repr__(self):
return self.__str__()
@property
def connector(self):
return {"home": self.home}
class EggRollAddress(AddressBase):
def __init__(self, home=None, name=None, namespace=None, connector_name=None):
self.name = name
self.namespace = namespace
self.home = home
super(EggRollAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.home, self.name, self.namespace).__hash__()
def __str__(self):
return f"EggRollAddress(name={self.name}, namespace={self.namespace})"
def __repr__(self):
return self.__str__()
@property
def connector(self):
return {"home": self.home}
class HDFSAddress(AddressBase):
def __init__(self, name_node=None, path=None, connector_name=None):
self.name_node = name_node
self.path = path
super(HDFSAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.name_node, self.path).__hash__()
def __str__(self):
return f"HDFSAddress(name_node={self.name_node}, path={self.path})"
def __repr__(self):
return self.__str__()
@property
def connector(self):
return {"name_node": self.name_node}
class PathAddress(AddressBase):
def __init__(self, path=None, connector_name=None):
self.path = path
super(PathAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return self.path.__hash__()
def __str__(self):
return f"PathAddress(path={self.path})"
def __repr__(self):
return self.__str__()
class ApiAddress(AddressBase):
def __init__(self, method="POST", url=None, header=None, body=None, connector_name=None):
self.method = method
self.url = url
self.header = header if header else {}
self.body = body if body else {}
super(ApiAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.method, self.url).__hash__()
def __str__(self):
return f"ApiAddress(url={self.url})"
def __repr__(self):
return self.__str__()
class MysqlAddress(AddressBase):
def __init__(self, user=None, passwd=None, host=None, port=None, db=None, name=None, connector_name=None):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.db = db
self.name = name
self.connector_name = connector_name
super(MysqlAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.host, self.port, self.db, self.name).__hash__()
def __str__(self):
return f"MysqlAddress(db={self.db}, name={self.name})"
def __repr__(self):
return self.__str__()
@property
def connector(self):
return {"user": self.user, "passwd": self.passwd, "host": self.host, "port": self.port, "db": self.db}
class HiveAddress(AddressBase):
def __init__(self, host=None, name=None, port=10000, username=None, database='default', auth_mechanism='PLAIN',
password=None, connector_name=None):
self.host = host
self.username = username
self.port = port
self.database = database
self.auth_mechanism = auth_mechanism
self.password = password
self.name = name
super(HiveAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.host, self.port, self.database, self.name).__hash__()
def __str__(self):
return f"HiveAddress(database={self.database}, name={self.name})"
def __repr__(self):
return self.__str__()
@property
def connector(self):
return {
"host": self.host,
"port": self.port,
"username": self.username,
"password": self.password,
"auth_mechanism": self.auth_mechanism,
"database": self.database}
class LinkisHiveAddress(AddressBase):
def __init__(self, host="127.0.0.1", port=9001, username='', database='', name='', run_type='hql',
execute_application_name='hive', source={}, params={}, connector_name=None):
self.host = host
self.port = port
self.username = username
self.database = database if database else f"{username}_ind"
self.name = name
self.run_type = run_type
self.execute_application_name = execute_application_name
self.source = source
self.params = params
super(LinkisHiveAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.host, self.port, self.database, self.name).__hash__()
def __str__(self):
return f"LinkisHiveAddress(database={self.database}, name={self.name})"
def __repr__(self):
return self.__str__()
class LocalFSAddress(AddressBase):
def __init__(self, path=None, connector_name=None):
self.path = path
super(LocalFSAddress, self).__init__(connector_name=connector_name)
def __hash__(self):
return (self.path).__hash__()
def __str__(self):
return f"LocalFSAddress(path={self.path})"
def __repr__(self):
return self.__str__()
| 6,524 | 29.490654 | 115 |
py
|
FATE
|
FATE-master/python/fate_arch/common/hdfs_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
_DELIMITER = '\t'
NEWLINE = '\n'
def deserialize(m):
fields = m.partition(_DELIMITER)
return fields[0], pickle.loads(bytes.fromhex(fields[2]))
def serialize(k, v):
return f"{k}{_DELIMITER}{pickle.dumps(v).hex()}"
| 861 | 27.733333 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/common/engine_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from fate_arch.common import FederatedMode, conf_utils
from fate_arch.computing import ComputingEngine
from fate_arch.federation import FederationEngine
from fate_arch.storage import StorageEngine
from fate_arch.relation_ship import Relationship
from fate_arch.common import EngineType
def get_engine_class_members(engine_class) -> list:
members = []
for k, v in engine_class.__dict__.items():
if k in ["__module__", "__dict__", "__weakref__", "__doc__"]:
continue
members.append(v)
return members
def get_engines():
engines = {
EngineType.COMPUTING: None,
EngineType.FEDERATION: None,
EngineType.STORAGE: None,
}
# check service_conf.yaml
if (
conf_utils.get_base_config("default_engines", {}).get(EngineType.COMPUTING)
is None
):
raise RuntimeError(f"must set default_engines on conf/service_conf.yaml")
default_engines = conf_utils.get_base_config("default_engines")
# computing engine
if default_engines.get(EngineType.COMPUTING) is None:
raise RuntimeError(f"{EngineType.COMPUTING} is None,"
f"Please check default_engines on conf/service_conf.yaml")
engines[EngineType.COMPUTING] = default_engines[EngineType.COMPUTING].upper()
if engines[EngineType.COMPUTING] not in get_engine_class_members(ComputingEngine):
raise RuntimeError(f"{engines[EngineType.COMPUTING]} is illegal")
# federation engine
if default_engines.get(EngineType.FEDERATION) is not None:
engines[EngineType.FEDERATION] = default_engines[EngineType.FEDERATION].upper()
# storage engine
if default_engines.get(EngineType.STORAGE) is not None:
engines[EngineType.STORAGE] = default_engines[EngineType.STORAGE].upper()
# set default storage engine and federation engine by computing engine
for t in (EngineType.STORAGE, EngineType.FEDERATION):
if engines.get(t) is None:
# use default relation engine
engines[t] = Relationship.Computing[engines[EngineType.COMPUTING]][t]["default"]
# set default federated mode by federation engine
if engines[EngineType.FEDERATION] == FederationEngine.STANDALONE:
engines["federated_mode"] = FederatedMode.SINGLE
else:
engines["federated_mode"] = FederatedMode.MULTIPLE
if engines[EngineType.STORAGE] not in get_engine_class_members(StorageEngine):
raise RuntimeError(f"{engines[EngineType.STORAGE]} is illegal")
if engines[EngineType.FEDERATION] not in get_engine_class_members(FederationEngine):
raise RuntimeError(f"{engines[EngineType.FEDERATION]} is illegal")
for t in [EngineType.FEDERATION]:
if engines[t] not in Relationship.Computing[engines[EngineType.COMPUTING]][t]["support"]:
raise RuntimeError(f"{engines[t]} is not supported in {engines[EngineType.COMPUTING]}")
return engines
def is_standalone():
return get_engines().get(EngineType.FEDERATION).upper() == FederationEngine.STANDALONE
def get_engines_config_from_conf(group_map=False):
engines_config = {}
engine_group_map = {}
for engine_type in {EngineType.COMPUTING, EngineType.FEDERATION, EngineType.STORAGE}:
engines_config[engine_type] = {}
engine_group_map[engine_type] = {}
for group_name, engine_map in Relationship.EngineConfMap.items():
for engine_type, name_maps in engine_map.items():
for name_map in name_maps:
single_engine_config = conf_utils.get_base_config(group_name, {}).get(name_map[1], {})
if single_engine_config:
engine_name = name_map[0]
engines_config[engine_type][engine_name] = single_engine_config
engine_group_map[engine_type][engine_name] = group_name
if not group_map:
return engines_config
else:
return engines_config, engine_group_map
| 4,582 | 39.557522 | 102 |
py
|
FATE
|
FATE-master/python/fate_arch/common/hive_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
from pyspark.sql import Row
_DELIMITER = ','
NEWLINE = '\n'
def deserialize_line(line):
return bytes.decode(line[0]), pickle.loads(bytes.fromhex(line[1]))
def serialize_line(k, v):
return f'{_DELIMITER}'.join([k, pickle.dumps(v).hex()]) + f"{NEWLINE}"
def read_line(line_data):
line = [str(i) for i in line_data]
return f'{_DELIMITER}'.join(line) + f"{NEWLINE}"
def from_row(r):
return r.key, pickle.loads(bytes.fromhex(r.value))
def to_row(k, v):
return Row(key=k, value=pickle.dumps(v).hex())
| 1,161 | 26.023256 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/common/remote_status.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
import typing
from fate_arch.common.log import getLogger
LOGGER = getLogger()
_remote_futures = set()
def _clear_callback(future):
LOGGER.debug("future `{future}` done, remove")
_remote_futures.remove(future)
def add_remote_futures(fs: typing.List[concurrent.futures.Future]):
for f in fs:
f.add_done_callback(_clear_callback)
_remote_futures.add(f)
def wait_all_remote_done(timeout=None):
concurrent.futures.wait(
_remote_futures, timeout=timeout, return_when=concurrent.futures.ALL_COMPLETED
)
| 1,193 | 26.767442 | 86 |
py
|
FATE
|
FATE-master/python/fate_arch/common/file_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from cachetools import LRUCache, cached
from ruamel import yaml
PROJECT_BASE = os.getenv("FATE_PROJECT_BASE") or os.getenv("FATE_DEPLOY_BASE")
FATE_BASE = os.getenv("FATE_BASE")
READTHEDOC = os.getenv("READTHEDOC")
def get_project_base_directory(*args):
global PROJECT_BASE
global READTHEDOC
if PROJECT_BASE is None:
PROJECT_BASE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir,
)
)
if READTHEDOC is None:
PROJECT_BASE = os.path.abspath(
os.path.join(
PROJECT_BASE,
os.pardir,
)
)
if args:
return os.path.join(PROJECT_BASE, *args)
return PROJECT_BASE
def get_fate_directory(*args):
global FATE_BASE
if FATE_BASE is None:
FATE_BASE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir,
)
)
if args:
return os.path.join(FATE_BASE, *args)
return FATE_BASE
def get_fate_python_directory(*args):
return get_fate_directory("python", *args)
def get_federatedml_setting_conf_directory():
return os.path.join(get_fate_python_directory(), 'federatedml', 'conf', 'setting_conf')
@cached(cache=LRUCache(maxsize=10))
def load_json_conf(conf_path):
if os.path.isabs(conf_path):
json_conf_path = conf_path
else:
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(json_conf_path) as f:
return json.load(f)
except BaseException:
raise EnvironmentError(
"loading json file config from '{}' failed!".format(json_conf_path)
)
def dump_json_conf(config_data, conf_path):
if os.path.isabs(conf_path):
json_conf_path = conf_path
else:
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(json_conf_path, "w") as f:
json.dump(config_data, f, indent=4)
except BaseException:
raise EnvironmentError(
"loading json file config from '{}' failed!".format(json_conf_path)
)
def load_json_conf_real_time(conf_path):
if os.path.isabs(conf_path):
json_conf_path = conf_path
else:
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(json_conf_path) as f:
return json.load(f)
except BaseException:
raise EnvironmentError(
"loading json file config from '{}' failed!".format(json_conf_path)
)
def load_yaml_conf(conf_path):
if not os.path.isabs(conf_path):
conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(conf_path) as f:
return yaml.safe_load(f)
except Exception as e:
raise EnvironmentError(
"loading yaml file config from {} failed:".format(conf_path), e
)
def rewrite_yaml_conf(conf_path, config):
if not os.path.isabs(conf_path):
conf_path = os.path.join(get_project_base_directory(), conf_path)
try:
with open(conf_path, "w") as f:
yaml.dump(config, f, Dumper=yaml.RoundTripDumper)
except Exception as e:
raise EnvironmentError(
"rewrite yaml file config {} failed:".format(conf_path), e
)
def rewrite_json_file(filepath, json_data):
with open(filepath, "w") as f:
json.dump(json_data, f, indent=4, separators=(",", ": "))
f.close()
| 4,382 | 28.816327 | 91 |
py
|
FATE
|
FATE-master/python/fate_arch/common/encrypt_utils.py
|
import base64
from Crypto import Random
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as PKCS1_cipher
def rsa_key_generate():
random_generator = Random.new().read
rsa = RSA.generate(2048, random_generator)
private_pem = rsa.exportKey().decode()
public_pem = rsa.publickey().exportKey().decode()
with open('private_key.pem', "w") as f:
f.write(private_pem)
with open('public_key.pem', "w") as f:
f.write(public_pem)
return private_pem, public_pem
def encrypt_data(public_key, msg):
cipher = PKCS1_cipher.new(RSA.importKey(public_key))
encrypt_text = base64.b64encode(cipher.encrypt(bytes(msg.encode("utf8"))))
return encrypt_text.decode('utf-8')
def pwdecrypt(private_key, encrypt_msg):
cipher = PKCS1_cipher.new(RSA.importKey(private_key))
back_text = cipher.decrypt(base64.b64decode(encrypt_msg), 0)
return back_text.decode('utf-8')
def test_encrypt_decrypt():
msg = "fate"
private_key, public_key = rsa_key_generate()
encrypt_text = encrypt_data(public_key, msg)
print(encrypt_text)
decrypt_text = pwdecrypt(private_key, encrypt_text)
print(msg == decrypt_text)
| 1,189 | 29.512821 | 78 |
py
|
FATE
|
FATE-master/python/fate_arch/common/__init__.py
|
from fate_arch.common._types import FederatedMode, FederatedCommunicationType, EngineType, CoordinationProxyService, \
CoordinationCommunicationProtocol
from fate_arch.common._types import BaseType, Party, DTable
| 217 | 53.5 | 118 |
py
|
FATE
|
FATE-master/python/fate_arch/common/conf_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from filelock import FileLock
from importlib import import_module
from fate_arch.common import file_utils
SERVICE_CONF = "service_conf.yaml"
TRANSFER_CONF = "transfer_conf.yaml"
def conf_realpath(conf_name):
conf_path = f"conf/{conf_name}"
return os.path.join(file_utils.get_project_base_directory(), conf_path)
def get_base_config(key, default=None, conf_name=SERVICE_CONF) -> dict:
local_config = {}
local_path = conf_realpath(f'local.{conf_name}')
if os.path.exists(local_path):
local_config = file_utils.load_yaml_conf(local_path)
if not isinstance(local_config, dict):
raise ValueError(f'Invalid config file: "{local_path}".')
if key is not None and key in local_config:
return local_config[key]
config_path = conf_realpath(conf_name)
config = file_utils.load_yaml_conf(config_path)
if not isinstance(config, dict):
raise ValueError(f'Invalid config file: "{config_path}".')
config.update(local_config)
return config.get(key, default) if key is not None else config
def decrypt_database_password(password):
encrypt_password = get_base_config("encrypt_password", False)
encrypt_module = get_base_config("encrypt_module", False)
private_key = get_base_config("private_key", None)
private_key_file = get_base_config("private_key_file", "")
if not password or not encrypt_password:
return password
if not private_key:
if private_key_file:
with open(conf_realpath(private_key_file)) as f:
private_key = f.read()
else:
raise ValueError("No private key")
module_fun = encrypt_module.split("#")
pwdecrypt_fun = getattr(import_module(module_fun[0]), module_fun[1])
return pwdecrypt_fun(private_key, password)
def decrypt_database_config(database=None, passwd_key="passwd"):
if not database:
database = get_base_config("database", {})
database[passwd_key] = decrypt_database_password(database[passwd_key])
return database
def update_config(key, value, conf_name=SERVICE_CONF):
conf_path = conf_realpath(conf_name=conf_name)
if not os.path.isabs(conf_path):
conf_path = os.path.join(file_utils.get_project_base_directory(), conf_path)
with FileLock(os.path.join(os.path.dirname(conf_path), ".lock")):
config = file_utils.load_yaml_conf(conf_path=conf_path) or {}
config[key] = value
file_utils.rewrite_yaml_conf(conf_path=conf_path, config=config)
| 3,148 | 34.382022 | 84 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_computing.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
distributed computing
"""
import abc
import typing
from abc import ABCMeta
from collections import Iterable
from fate_arch.abc._address import AddressABC
from fate_arch.abc._path import PathABC
__all__ = ["CTableABC", "CSessionABC"]
# noinspection PyPep8Naming
class CTableABC(metaclass=ABCMeta):
"""
a table of pair-like data supports distributed processing
"""
@property
@abc.abstractmethod
def engine(self):
"""
get the engine name of table
Returns
-------
int
number of partitions
"""
...
@property
@abc.abstractmethod
def partitions(self):
"""
get the partitions of table
Returns
-------
int
number of partitions
"""
...
@abc.abstractmethod
def copy(self):
...
@abc.abstractmethod
def save(self, address: AddressABC, partitions: int, schema: dict, **kwargs):
"""
save table
Parameters
----------
address: AddressABC
address to save table to
partitions: int
number of partitions to save as
schema: dict
table schema
"""
...
@abc.abstractmethod
def collect(self, **kwargs) -> typing.Generator:
"""
collect data from table
Returns
-------
generator
generator of data
Notes
------
no order guarantee
"""
...
@abc.abstractmethod
def take(self, n=1, **kwargs):
"""
take ``n`` data from table
Parameters
----------
n: int
number of data to take
Returns
-------
list
a list of ``n`` data
Notes
------
no order guarantee
"""
...
@abc.abstractmethod
def first(self, **kwargs):
"""
take one data from table
Returns
-------
object
a data from table
Notes
-------
no order guarantee
"""
...
@abc.abstractmethod
def count(self) -> int:
"""
number of data in table
Returns
-------
int
number of data
"""
...
@abc.abstractmethod
def map(self, func) -> 'CTableABC':
"""
apply `func` to each data
Parameters
----------
func: ``typing.Callable[[object, object], typing.Tuple[object, object]]``
function map (k1, v1) to (k2, v2)
Returns
-------
CTableABC
A new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([('k1', 1), ('k2', 2), ('k3', 3)], include_key=True, partition=2)
>>> b = a.map(lambda k, v: (k, v**2))
>>> list(b.collect())
[("k1", 1), ("k2", 4), ("k3", 9)]
"""
...
@abc.abstractmethod
def mapValues(self, func):
"""
apply `func` to each value of data
Parameters
----------
func: ``typing.Callable[[object], object]``
map v1 to v2
Returns
-------
CTableABC
A new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([('a', ['apple', 'banana', 'lemon']), ('b', ['grapes'])], include_key=True, partition=2)
>>> b = a.mapValues(lambda x: len(x))
>>> list(b.collect())
[('a', 3), ('b', 1)]
"""
...
@abc.abstractmethod
def mapPartitions(self, func, use_previous_behavior=True, preserves_partitioning=False):
"""
apply ``func`` to each partition of table
Parameters
----------
func: ``typing.Callable[[iter], list]``
accept an iterator of pair, return a list of pair
use_previous_behavior: bool
this parameter is provided for compatible reason, if set True, call this func will call ``applyPartitions`` instead
preserves_partitioning: bool
flag indicate whether the `func` will preserve partition
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([1, 2, 3, 4, 5], include_key=False, partition=2)
>>> def f(iterator):
... s = 0
... for k, v in iterator:
... s += v
... return [(s, s)]
...
>>> b = a.mapPartitions(f)
>>> list(b.collect())
[(6, 6), (9, 9)]
"""
...
@abc.abstractmethod
def mapReducePartitions(self, mapper, reducer, **kwargs):
"""
apply ``mapper`` to each partition of table and then perform reduce by key operation with `reducer`
Parameters
----------
mapper: ``typing.Callable[[iter], list]``
accept an iterator of pair, return a list of pair
reducer: ``typing.Callable[[object, object], object]``
reduce v1, v2 to v3
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> table = computing_session.parallelize([(1, 2), (2, 3), (3, 4), (4, 5)], include_key=False, partition=2)
>>> def _mapper(it):
... r = []
... for k, v in it:
... r.append((k % 3, v**2))
... r.append((k % 2, v ** 3))
... return r
>>> def _reducer(a, b):
... return a + b
>>> output = table.mapReducePartitions(_mapper, _reducer)
>>> collected = dict(output.collect())
>>> assert collected[0] == 3 ** 3 + 5 ** 3 + 4 ** 2
>>> assert collected[1] == 2 ** 3 + 4 ** 3 + 2 ** 2 + 5 ** 2
>>> assert collected[2] == 3 ** 2
"""
...
def applyPartitions(self, func):
"""
apply ``func`` to each partitions as a single object
Parameters
----------
func: ``typing.Callable[[iter], object]``
accept a iterator, return a object
Returns
-------
CTableABC
a new table, with each partition contains a single key-value pair
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([1, 2, 3], partition=3, include_key=False)
>>> def f(it):
... r = []
... for k, v in it:
... r.append(v, v**2, v**3)
... return r
>>> output = a.applyPartitions(f)
>>> assert (2, 2**2, 2**3) in [v[0] for _, v in output.collect()]
"""
...
@abc.abstractmethod
def mapPartitionsWithIndex(self, func, preserves_partitioning=False):
...
@abc.abstractmethod
def flatMap(self, func):
"""
apply a flat ``func`` to each data of table
Parameters
----------
func: ``typing.Callable[[object, object], typing.List[object, object]]``
a flat function accept two parameters return a list of pair
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([(1, 1), (2, 2)], include_key=True, partition=2)
>>> b = a.flatMap(lambda x, y: [(x, y), (x + 10, y ** 2)])
>>> c = list(b.collect())
>>> assert len(c) = 4
>>> assert ((1, 1) in c) and ((2, 2) in c) and ((11, 1) in c) and ((12, 4) in c)
"""
...
@abc.abstractmethod
def reduce(self, func):
"""
reduces all value in pair of table by a binary function `func`
Parameters
----------
func: typing.Callable[[object, object], object]
binary function reduce two value into one
Returns
-------
object
a single object
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize(range(100), include_key=False, partition=4)
>>> assert a.reduce(lambda x, y: x + y) == sum(range(100))
Notes
------
`func` should be associative
"""
...
@abc.abstractmethod
def glom(self):
"""
coalesces all data within partition into a list
Returns
-------
list
list containing all coalesced partition and its elements.
First element of each tuple is chosen from key of last element of each partition.
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize(range(5), include_key=False, partition=3).glom().collect()
>>> list(a)
[(2, [(2, 2)]), (3, [(0, 0), (3, 3)]), (4, [(1, 1), (4, 4)])]
"""
...
@abc.abstractmethod
def sample(self, *, fraction: typing.Optional[float] = None, num: typing.Optional[int] = None, seed=None):
"""
return a sampled subset of this Table.
Parameters
----------
fraction: float
Expected size of the sample as a fraction of this table's size
without replacement: probability that each element is chosen.
Fraction must be [0, 1] with replacement: expected number of times each element is chosen.
num: int
Exact number of the sample from this table's size
seed: int
Seed of the random number generator. Use current timestamp when `None` is passed.
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> x = computing_session.parallelize(range(100), include_key=False, partition=4)
>>> 6 <= x.sample(fraction=0.1, seed=81).count() <= 14
True
Notes
-------
use one of ``fraction`` and ``num``, not both
"""
...
@abc.abstractmethod
def filter(self, func):
"""
returns a new table containing only those keys which satisfy a predicate passed in via ``func``.
Parameters
----------
func: typing.Callable[[object, object], bool]
Predicate function returning a boolean.
Returns
-------
CTableABC
A new table containing results.
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([0, 1, 2], include_key=False, partition=2)
>>> b = a.filter(lambda k, v : k % 2 == 0)
>>> list(b.collect())
[(0, 0), (2, 2)]
>>> c = a.filter(lambda k, v : v % 2 != 0)
>>> list(c.collect())
[(1, 1)]
"""
...
@abc.abstractmethod
def join(self, other, func):
"""
returns intersection of this table and the other table.
function ``func`` will be applied to values of keys that exist in both table.
Parameters
----------
other: CTableABC
another table to be operated with.
func: ``typing.Callable[[object, object], object]``
the function applying to values whose key exists in both tables.
default using left table's value.
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([1, 2, 3], include_key=False, partition=2) # [(0, 1), (1, 2), (2, 3)]
>>> b = computing_session.parallelize([(1, 1), (2, 2), (3, 3)], include_key=True, partition=2)
>>> c = a.join(b, lambda v1, v2 : v1 + v2)
>>> list(c.collect())
[(1, 3), (2, 5)]
"""
...
@abc.abstractmethod
def union(self, other, func=lambda v1, v2: v1):
"""
returns union of this table and the other table.
function ``func`` will be applied to values of keys that exist in both table.
Parameters
----------
other: CTableABC
another table to be operated with.
func: ``typing.Callable[[object, object], object]``
The function applying to values whose key exists in both tables.
default using left table's value.
Returns
-------
CTableABC
a new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize([1, 2, 3], include_key=False, partition=2) # [(0, 1), (1, 2), (2, 3)]
>>> b = computing_session.parallelize([(1, 1), (2, 2), (3, 3)], include_key=True, partition=2)
>>> c = a.union(b, lambda v1, v2 : v1 + v2)
>>> list(c.collect())
[(0, 1), (1, 3), (2, 5), (3, 3)]
"""
...
@abc.abstractmethod
def subtractByKey(self, other):
"""
returns a new table containing elements only in this table but not in the other table.
Parameters
----------
other: CTableABC
Another table to be subtractbykey with.
Returns
-------
CTableABC
A new table
Examples
--------
>>> from fate_arch.session import computing_session
>>> a = computing_session.parallelize(range(10), include_key=False, partition=2)
>>> b = computing_session.parallelize(range(5), include_key=False, partition=2)
>>> c = a.subtractByKey(b)
>>> list(c.collect())
[(5, 5), (6, 6), (7, 7), (8, 8), (9, 9)]
"""
...
@property
def schema(self):
if not hasattr(self, "_schema"):
setattr(self, "_schema", {})
return getattr(self, "_schema")
@schema.setter
def schema(self, value):
setattr(self, "_schema", value)
class CSessionABC(metaclass=ABCMeta):
"""
computing session to load/create/clean tables
"""
@abc.abstractmethod
def load(self, address: AddressABC, partitions, schema: dict, **kwargs) -> typing.Union[PathABC, CTableABC]:
"""
load a table from given address
Parameters
----------
address: AddressABC
address to load table from
partitions: int
number of partitions of loaded table
schema: dict
schema associate with this table
Returns
-------
CTableABC
a table in memory
"""
...
@abc.abstractmethod
def parallelize(self, data: Iterable, partition: int, include_key: bool, **kwargs) -> CTableABC:
"""
create table from iterable data
Parameters
----------
data: Iterable
data to create table from
partition: int
number of partitions of created table
include_key: bool
``True`` for create table directly from data, ``False`` for create table with generated keys start from 0
Returns
-------
CTableABC
a table create from data
"""
pass
@abc.abstractmethod
def cleanup(self, name, namespace):
"""
delete table(s)
Parameters
----------
name: str
table name or wildcard character
namespace: str
namespace
"""
@abc.abstractmethod
def destroy(self):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def kill(self):
pass
@property
@abc.abstractmethod
def session_id(self) -> str:
"""
get computing session id
Returns
-------
str
computing session id
"""
...
| 16,794 | 25.829073 | 134 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_path.py
|
from abc import ABCMeta
class PathABC(metaclass=ABCMeta):
...
| 68 | 10.5 | 33 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_federation.py
|
import abc
import typing
from abc import ABCMeta
from fate_arch.abc._gc import GarbageCollectionABC
from fate_arch.common import Party
__all__ = ["FederationABC"]
class FederationABC(metaclass=ABCMeta):
"""
federation, get or remote objects and tables
"""
@property
@abc.abstractmethod
def session_id(self) -> str:
...
@abc.abstractmethod
def get(self, name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC) -> typing.List:
"""
get objects/tables from ``parties``
Parameters
----------
name: str
name of transfer variable
tag: str
tag to distinguish each transfer
parties: typing.List[Party]
parties to get objects/tables from
gc: GarbageCollectionABC
used to do some clean jobs
Returns
-------
list
a list of object or a list of table get from parties with same order of `parties`
"""
...
@abc.abstractmethod
def remote(self, v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC):
"""
remote object/table to ``parties``
Parameters
----------
v: object or table
object/table to remote
name: str
name of transfer variable
tag: str
tag to distinguish each transfer
parties: typing.List[Party]
parties to remote object/table to
gc: GarbageCollectionABC
used to do some clean jobs
Returns
-------
Notes
"""
...
@abc.abstractmethod
def destroy(self, parties):
"""
destroy federation from ``parties``
Parameters
----------
parties: typing.List[Party]
parties to get objects/tables from
Returns
-------
None
"""
...
| 2,039 | 21.417582 | 92 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_gc.py
|
import abc
class GarbageCollectionABC(metaclass=abc.ABCMeta):
def add_gc_action(self, tag: str, obj, method, args_dict):
...
| 140 | 16.625 | 62 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_components.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from abc import ABCMeta
class ComponentMeta(metaclass=ABCMeta):
def get_run_obj(self, role: str):
...
def get_run_obj_name(self, role: str) -> str:
...
def get_param_obj(self, cpn_name: str):
...
def get_supported_roles(self) -> typing.Set[str]:
...
class Components(metaclass=ABCMeta):
provider_version = None
provider_name = None
provider_path = None
@classmethod
def get_names(cls) -> typing.Dict[str, dict]:
...
@classmethod
def get(cls, name: str, cache) -> ComponentMeta:
...
| 1,215 | 24.333333 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/__init__.py
|
from fate_arch.abc._gc import GarbageCollectionABC
from fate_arch.abc._address import AddressABC
from fate_arch.abc._computing import CTableABC, CSessionABC
from fate_arch.abc._storage import StorageTableABC, StorageSessionABC, StorageTableMetaABC
from fate_arch.abc._federation import FederationABC
from fate_arch.abc._components import Components, ComponentMeta
| 365 | 44.75 | 90 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_storage.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Iterable
from fate_arch.common.log import getLogger
LOGGER = getLogger()
class StorageTableMetaABC(metaclass=abc.ABCMeta):
@abc.abstractmethod
def create(self):
...
@abc.abstractmethod
def set_metas(self, **kwargs):
...
@abc.abstractmethod
def query_table_meta(self, filter_fields, query_fields=None):
...
@abc.abstractmethod
def update_metas(self, schema=None, count=None, part_of_data=None, description=None, partitions=None, **kwargs):
...
@abc.abstractmethod
def destroy_metas(self):
...
@abc.abstractmethod
def get_name(self):
...
...
@abc.abstractmethod
def get_namespace(self):
...
@abc.abstractmethod
def get_address(self):
...
@abc.abstractmethod
def get_engine(self):
...
@abc.abstractmethod
def get_store_type(self):
...
@abc.abstractmethod
def get_options(self):
...
@abc.abstractmethod
def get_partitions(self):
...
@abc.abstractmethod
def get_in_serialized(self):
...
@abc.abstractmethod
def get_id_delimiter(self):
...
@abc.abstractmethod
def get_extend_sid(self):
...
@abc.abstractmethod
def get_auto_increasing_sid(self):
...
@abc.abstractmethod
def get_have_head(self):
...
@abc.abstractmethod
def get_schema(self):
...
@abc.abstractmethod
def get_count(self):
...
@abc.abstractmethod
def get_part_of_data(self):
...
@abc.abstractmethod
def get_description(self):
...
@abc.abstractmethod
def get_origin(self):
...
@abc.abstractmethod
def get_disable(self):
...
@abc.abstractmethod
def to_dict(self) -> dict:
...
class StorageTableABC(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def name(self):
...
@property
@abc.abstractmethod
def namespace(self):
...
@property
@abc.abstractmethod
def address(self):
...
@property
@abc.abstractmethod
def engine(self):
...
@property
@abc.abstractmethod
def store_type(self):
...
@property
@abc.abstractmethod
def options(self):
...
@property
@abc.abstractmethod
def partitions(self):
...
@property
@abc.abstractmethod
def meta(self) -> StorageTableMetaABC:
...
@meta.setter
@abc.abstractmethod
def meta(self, meta: StorageTableMetaABC):
...
@abc.abstractmethod
def update_meta(self,
schema=None,
count=None,
part_of_data=None,
description=None,
partitions=None,
**kwargs) -> StorageTableMetaABC:
...
@abc.abstractmethod
def create_meta(self, **kwargs) -> StorageTableMetaABC:
...
@abc.abstractmethod
def put_all(self, kv_list: Iterable, **kwargs):
...
@abc.abstractmethod
def collect(self, **kwargs) -> list:
...
@abc.abstractmethod
def read(self) -> list:
...
@abc.abstractmethod
def count(self):
...
@abc.abstractmethod
def destroy(self):
...
@abc.abstractmethod
def check_address(self):
...
class StorageSessionABC(metaclass=abc.ABCMeta):
@abc.abstractmethod
def create_table(self, address, name, namespace, partitions, storage_type=None, options=None,
**kwargs) -> StorageTableABC:
...
@abc.abstractmethod
def get_table(self, name, namespace) -> StorageTableABC:
...
@abc.abstractmethod
def get_table_meta(self, name, namespace) -> StorageTableMetaABC:
...
# @abc.abstractmethod
# def table(self, name, namespace, address, partitions, store_type=None, options=None, **kwargs) -> StorageTableABC:
# ...
# @abc.abstractmethod
# def get_storage_info(self, name, namespace):
# ...
@abc.abstractmethod
def destroy(self):
...
@abc.abstractmethod
def stop(self):
...
@abc.abstractmethod
def kill(self):
...
@abc.abstractmethod
def cleanup(self, name, namespace):
...
@property
@abc.abstractmethod
def session_id(self) -> str:
...
@property
@abc.abstractmethod
def engine(self) -> str:
...
| 5,179 | 19.155642 | 120 |
py
|
FATE
|
FATE-master/python/fate_arch/abc/_address.py
|
import abc
class AddressABC(metaclass=abc.ABCMeta):
...
| 62 | 9.5 | 40 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/_util.py
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.abc import CTableABC
def is_table(v):
return isinstance(v, CTableABC)
| 712 | 30 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/non_distributed.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class LocalData():
def __init__(self, path, engine=None):
self.path = path
self.schema = {"header": [], "sid_name": "id"}
self._engine = engine
@property
def engine(self):
return self._engine
@property
def partitions(self):
return
def count(self, **kwargs):
return 0
def save(self, address, **kwargs):
pass
| 1,013 | 26.405405 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/__init__.py
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing._type import ComputingEngine
from fate_arch.computing._util import is_table
__all__ = ['is_table', 'ComputingEngine']
| 765 | 33.818182 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/_type.py
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class ComputingEngine(object):
EGGROLL = 'EGGROLL'
SPARK = 'SPARK'
LINKIS_SPARK = 'LINKIS_SPARK'
STANDALONE = 'STANDALONE'
| 760 | 32.086957 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/standalone/_table.py
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import typing
from fate_arch.abc import CTableABC
from fate_arch.common import log
from fate_arch.common.profile import computing_profile
from fate_arch.computing._type import ComputingEngine
LOGGER = log.getLogger()
class Table(CTableABC):
def __init__(self, table):
self._table = table
self._engine = ComputingEngine.STANDALONE
self._count = None
@property
def engine(self):
return self._engine
def __getstate__(self):
pass
@property
def partitions(self):
return self._table.partitions
def copy(self):
return Table(self._table.mapValues(lambda x: x))
@computing_profile
def save(self, address, partitions, schema, **kwargs):
from fate_arch.common.address import StandaloneAddress
if isinstance(address, StandaloneAddress):
self._table.save_as(
name=address.name,
namespace=address.namespace,
partition=partitions,
need_cleanup=False,
)
schema.update(self.schema)
return
from fate_arch.common.address import PathAddress
if isinstance(address, PathAddress):
from fate_arch.computing.non_distributed import LocalData
return LocalData(address.path)
raise NotImplementedError(
f"address type {type(address)} not supported with standalone backend"
)
@computing_profile
def count(self) -> int:
if self._count is None:
self._count = self._table.count()
return self._count
@computing_profile
def collect(self, **kwargs):
return self._table.collect(**kwargs)
@computing_profile
def take(self, n=1, **kwargs):
return self._table.take(n=n, **kwargs)
@computing_profile
def first(self, **kwargs):
resp = list(itertools.islice(self._table.collect(**kwargs), 1))
if len(resp) < 1:
raise RuntimeError("table is empty")
return resp[0]
@computing_profile
def reduce(self, func, **kwargs):
return self._table.reduce(func)
@computing_profile
def map(self, func):
return Table(self._table.map(func))
@computing_profile
def mapValues(self, func):
return Table(self._table.mapValues(func))
@computing_profile
def flatMap(self, func):
return Table(self._table.flatMap(func))
@computing_profile
def applyPartitions(self, func):
return Table(self._table.applyPartitions(func))
@computing_profile
def mapPartitions(
self, func, use_previous_behavior=True, preserves_partitioning=False
):
if use_previous_behavior is True:
LOGGER.warning(
"please use `applyPartitions` instead of `mapPartitions` "
"if the previous behavior was expected. "
"The previous behavior will not work in future"
)
return Table(self._table.applyPartitions(func))
return Table(
self._table.mapPartitions(
func, preserves_partitioning=preserves_partitioning
)
)
@computing_profile
def mapReducePartitions(self, mapper, reducer, **kwargs):
return Table(self._table.mapReducePartitions(mapper, reducer))
@computing_profile
def mapPartitionsWithIndex(self, func, preserves_partitioning=False, **kwargs):
return Table(
self._table.mapPartitionsWithIndex(
func, preserves_partitioning=preserves_partitioning
)
)
@computing_profile
def glom(self):
return Table(self._table.glom())
@computing_profile
def sample(
self,
*,
fraction: typing.Optional[float] = None,
num: typing.Optional[int] = None,
seed=None,
):
if fraction is not None:
return Table(self._table.sample(fraction=fraction, seed=seed))
if num is not None:
total = self._table.count()
if num > total:
raise ValueError(
f"not enough data to sample, own {total} but required {num}"
)
frac = num / float(total)
while True:
sampled_table = self._table.sample(fraction=frac, seed=seed)
sampled_count = sampled_table.count()
if sampled_count < num:
frac += 0.1
else:
break
if sampled_count > num:
drops = sampled_table.take(sampled_count - num)
for k, v in drops:
sampled_table.delete(k)
return Table(sampled_table)
raise ValueError(
f"exactly one of `fraction` or `num` required, fraction={fraction}, num={num}"
)
@computing_profile
def filter(self, func):
return Table(self._table.filter(func))
@computing_profile
def join(self, other: "Table", func):
return Table(self._table.join(other._table, func))
@computing_profile
def subtractByKey(self, other: "Table"):
return Table(self._table.subtractByKey(other._table))
@computing_profile
def union(self, other: "Table", func=lambda v1, v2: v1):
return Table(self._table.union(other._table, func))
| 6,013 | 29.07 | 90 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/standalone/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing.standalone._csession import CSession
from fate_arch.computing.standalone._table import Table
__all__ = ['Table', 'CSession']
| 768 | 35.619048 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/standalone/_csession.py
|
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import Iterable
from fate_arch._standalone import Session
from fate_arch.abc import AddressABC, CSessionABC
from fate_arch.common.base_utils import fate_uuid
from fate_arch.common.log import getLogger
from fate_arch.computing.standalone._table import Table
LOGGER = getLogger()
class CSession(CSessionABC):
def __init__(self, session_id: str, options=None):
if options is not None:
max_workers = options.get("task_cores", None)
self._session = Session(session_id, max_workers=max_workers)
def get_standalone_session(self):
return self._session
@property
def session_id(self):
return self._session.session_id
def load(self, address: AddressABC, partitions: int, schema: dict, **kwargs):
from fate_arch.common.address import StandaloneAddress
from fate_arch.storage import StandaloneStoreType
if isinstance(address, StandaloneAddress):
raw_table = self._session.load(address.name, address.namespace)
if address.storage_type != StandaloneStoreType.ROLLPAIR_IN_MEMORY:
raw_table = raw_table.save_as(
name=f"{address.name}_{fate_uuid()}",
namespace=address.namespace,
partition=partitions,
need_cleanup=True,
)
table = Table(raw_table)
table.schema = schema
return table
from fate_arch.common.address import PathAddress
if isinstance(address, PathAddress):
from fate_arch.computing.non_distributed import LocalData
from fate_arch.computing import ComputingEngine
return LocalData(address.path, engine=ComputingEngine.STANDALONE)
raise NotImplementedError(
f"address type {type(address)} not supported with standalone backend"
)
def parallelize(self, data: Iterable, partition: int, include_key: bool, **kwargs):
table = self._session.parallelize(
data=data, partition=partition, include_key=include_key, **kwargs
)
return Table(table)
def cleanup(self, name, namespace):
return self._session.cleanup(name=name, namespace=namespace)
def stop(self):
return self._session.stop()
def kill(self):
return self._session.kill()
def destroy(self):
try:
LOGGER.info(f"clean table namespace {self.session_id}")
self.cleanup(namespace=self.session_id, name="*")
except Exception as e:
LOGGER.warning(f"no found table namespace {self.session_id}")
try:
self.stop()
except Exception as e:
LOGGER.warning(f"stop storage session {self.session_id} failed, try to kill", e)
self.kill()
| 3,439 | 35.595745 | 92 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/eggroll/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from fate_arch.abc import CTableABC
from fate_arch.common import log
from fate_arch.common.profile import computing_profile
from fate_arch.computing._type import ComputingEngine
LOGGER = log.getLogger()
class Table(CTableABC):
def __init__(self, rp):
self._rp = rp
self._engine = ComputingEngine.EGGROLL
self._count = None
@property
def engine(self):
return self._engine
@property
def partitions(self):
return self._rp.get_partitions()
def copy(self):
return Table(self._rp.map_values(lambda x: x))
@computing_profile
def save(self, address, partitions, schema: dict, **kwargs):
options = kwargs.get("options", {})
from fate_arch.common.address import EggRollAddress
from fate_arch.storage import EggRollStoreType
if isinstance(address, EggRollAddress):
options["store_type"] = kwargs.get("store_type", EggRollStoreType.ROLLPAIR_LMDB)
self._rp.save_as(name=address.name, namespace=address.namespace, partition=partitions, options=options)
schema.update(self.schema)
return
from fate_arch.common.address import PathAddress
if isinstance(address, PathAddress):
from fate_arch.computing.non_distributed import LocalData
return LocalData(address.path)
raise NotImplementedError(f"address type {type(address)} not supported with eggroll backend")
@computing_profile
def collect(self, **kwargs) -> list:
return self._rp.get_all()
@computing_profile
def count(self, **kwargs) -> int:
if self._count is None:
self._count = self._rp.count()
return self._count
@computing_profile
def take(self, n=1, **kwargs):
options = dict(keys_only=False)
return self._rp.take(n=n, options=options)
@computing_profile
def first(self):
options = dict(keys_only=False)
return self._rp.first(options=options)
@computing_profile
def map(self, func, **kwargs):
return Table(self._rp.map(func))
@computing_profile
def mapValues(self, func: typing.Callable[[typing.Any], typing.Any], **kwargs):
return Table(self._rp.map_values(func))
@computing_profile
def applyPartitions(self, func):
return Table(self._rp.collapse_partitions(func))
@computing_profile
def mapPartitions(self, func, use_previous_behavior=True, preserves_partitioning=False, **kwargs):
if use_previous_behavior is True:
LOGGER.warning(f"please use `applyPartitions` instead of `mapPartitions` "
f"if the previous behavior was expected. "
f"The previous behavior will not work in future")
return self.applyPartitions(func)
return Table(self._rp.map_partitions(func, options={"shuffle": not preserves_partitioning}))
@computing_profile
def mapReducePartitions(self, mapper, reducer, **kwargs):
return Table(self._rp.map_partitions(func=mapper, reduce_op=reducer))
@computing_profile
def mapPartitionsWithIndex(self, func, preserves_partitioning=False, **kwargs):
return Table(self._rp.map_partitions_with_index(func, options={"shuffle": not preserves_partitioning}))
@computing_profile
def reduce(self, func, **kwargs):
return self._rp.reduce(func)
@computing_profile
def join(self, other: 'Table', func, **kwargs):
return Table(self._rp.join(other._rp, func=func))
@computing_profile
def glom(self, **kwargs):
return Table(self._rp.glom())
@computing_profile
def sample(self, *, fraction: typing.Optional[float] = None, num: typing.Optional[int] = None, seed=None):
if fraction is not None:
return Table(self._rp.sample(fraction=fraction, seed=seed))
if num is not None:
total = self._rp.count()
if num > total:
raise ValueError(f"not enough data to sample, own {total} but required {num}")
frac = num / float(total)
while True:
sampled_table = self._rp.sample(fraction=frac, seed=seed)
sampled_count = sampled_table.count()
if sampled_count < num:
frac *= 1.1
else:
break
if sampled_count > num:
drops = sampled_table.take(sampled_count - num)
for k, v in drops:
sampled_table.delete(k)
return Table(sampled_table)
raise ValueError(f"exactly one of `fraction` or `num` required, fraction={fraction}, num={num}")
@computing_profile
def subtractByKey(self, other: 'Table', **kwargs):
return Table(self._rp.subtract_by_key(other._rp))
@computing_profile
def filter(self, func, **kwargs):
return Table(self._rp.filter(func))
@computing_profile
def union(self, other: 'Table', func=lambda v1, v2: v1, **kwargs):
return Table(self._rp.union(other._rp, func=func))
@computing_profile
def flatMap(self, func, **kwargs):
flat_map = self._rp.flat_map(func)
shuffled = flat_map.map(lambda k, v: (k, v)) # trigger shuffle
return Table(shuffled)
| 5,946 | 33.575581 | 115 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/eggroll/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing.eggroll._table import Table
from fate_arch.computing.eggroll._csession import CSession
__all__ = ['Table', 'CSession']
| 762 | 35.333333 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/eggroll/_csession.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from eggroll.core.session import session_init
from eggroll.roll_pair.roll_pair import runtime_init
from fate_arch.abc import AddressABC, CSessionABC
from fate_arch.common.base_utils import fate_uuid
from fate_arch.common.log import getLogger
from fate_arch.common.profile import computing_profile
from fate_arch.computing.eggroll import Table
LOGGER = getLogger()
class CSession(CSessionABC):
def __init__(self, session_id, options: dict = None):
if options is None:
options = {}
if "eggroll.session.deploy.mode" not in options:
options["eggroll.session.deploy.mode"] = "cluster"
if "eggroll.rollpair.inmemory_output" not in options:
options["eggroll.rollpair.inmemory_output"] = True
self._rp_session = session_init(session_id=session_id, options=options)
self._rpc = runtime_init(session=self._rp_session)
self._session_id = self._rp_session.get_session_id()
def get_rpc(self):
return self._rpc
@property
def session_id(self):
return self._session_id
@computing_profile
def load(self, address: AddressABC, partitions: int, schema: dict, **kwargs):
from fate_arch.common.address import EggRollAddress
from fate_arch.storage import EggRollStoreType
if isinstance(address, EggRollAddress):
options = kwargs.get("option", {})
options["total_partitions"] = partitions
options["store_type"] = kwargs.get("store_type", EggRollStoreType.ROLLPAIR_LMDB)
options["create_if_missing"] = False
rp = self._rpc.load(
namespace=address.namespace, name=address.name, options=options
)
if rp is None or rp.get_partitions() == 0:
raise RuntimeError(
f"no exists: {address.name}, {address.namespace}"
)
if options["store_type"] != EggRollStoreType.ROLLPAIR_IN_MEMORY:
rp = rp.save_as(
name=f"{address.name}_{fate_uuid()}",
namespace=self.session_id,
partition=partitions,
options={"store_type": EggRollStoreType.ROLLPAIR_IN_MEMORY},
)
table = Table(rp=rp)
table.schema = schema
return table
from fate_arch.common.address import PathAddress
if isinstance(address, PathAddress):
from fate_arch.computing.non_distributed import LocalData
from fate_arch.computing import ComputingEngine
return LocalData(address.path, engine=ComputingEngine.EGGROLL)
raise NotImplementedError(
f"address type {type(address)} not supported with eggroll backend"
)
@computing_profile
def parallelize(self, data, partition: int, include_key: bool, **kwargs) -> Table:
options = dict()
options["total_partitions"] = partition
options["include_key"] = include_key
rp = self._rpc.parallelize(data=data, options=options)
return Table(rp)
def cleanup(self, name, namespace):
self._rpc.cleanup(name=name, namespace=namespace)
def stop(self):
return self._rp_session.stop()
def kill(self):
return self._rp_session.kill()
def destroy(self):
try:
LOGGER.info(f"clean table namespace {self.session_id}")
self.cleanup(namespace=self.session_id, name="*")
except Exception as e:
LOGGER.warning(f"no found table namespace {self.session_id}")
try:
self.stop()
except Exception as e:
LOGGER.warning(f"stop storage session {self.session_id} failed, try to kill", e)
self.kill()
| 4,390 | 35.591667 | 92 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/spark/_materialize.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import StorageLevel
# noinspection PyUnresolvedReferences
def materialize(rdd):
rdd.persist(get_storage_level())
rdd.count()
return rdd
def unmaterialize(rdd):
rdd.unpersist()
# noinspection PyUnresolvedReferences
def get_storage_level():
return StorageLevel.MEMORY_AND_DISK
| 932 | 25.657143 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/spark/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from itertools import chain
import typing
import pyspark
from pyspark.rddsampler import RDDSamplerBase
from fate_arch.abc import CTableABC
from fate_arch.common import log, hdfs_utils, hive_utils
from fate_arch.common.profile import computing_profile
from fate_arch.computing.spark._materialize import materialize, unmaterialize
from scipy.stats import hypergeom
from fate_arch.computing._type import ComputingEngine
LOGGER = log.getLogger()
class Table(CTableABC):
def __init__(self, rdd):
self._rdd: pyspark.RDD = rdd
self._engine = ComputingEngine.SPARK
self._count = None
@property
def engine(self):
return self._engine
def __getstate__(self):
pass
def __del__(self):
try:
unmaterialize(self._rdd)
del self._rdd
except BaseException:
return
def copy(self):
"""rdd is immutable, yet, inside content could be modify in some case"""
return Table(_map_value(self._rdd, lambda x: x))
@computing_profile
def save(self, address, partitions, schema, **kwargs):
from fate_arch.common.address import HDFSAddress
if isinstance(address, HDFSAddress):
self._rdd.map(lambda x: hdfs_utils.serialize(x[0], x[1])).repartition(
partitions
).saveAsTextFile(f"{address.name_node}/{address.path}")
schema.update(self.schema)
return
from fate_arch.common.address import HiveAddress, LinkisHiveAddress
if isinstance(address, (HiveAddress, LinkisHiveAddress)):
# df = (
# self._rdd.map(lambda x: hive_utils.to_row(x[0], x[1]))
# .repartition(partitions)
# .toDF()
# )
LOGGER.debug(f"partitions: {partitions}")
_repartition = self._rdd.map(lambda x: hive_utils.to_row(x[0], x[1])).repartition(partitions)
_repartition.toDF().write.saveAsTable(f"{address.database}.{address.name}")
schema.update(self.schema)
return
from fate_arch.common.address import LocalFSAddress
if isinstance(address, LocalFSAddress):
self._rdd.map(lambda x: hdfs_utils.serialize(x[0], x[1])).repartition(
partitions
).saveAsTextFile(address.path)
schema.update(self.schema)
return
raise NotImplementedError(
f"address type {type(address)} not supported with spark backend"
)
@property
def partitions(self):
return self._rdd.getNumPartitions()
@computing_profile
def map(self, func, **kwargs):
return from_rdd(_map(self._rdd, func))
@computing_profile
def mapValues(self, func, **kwargs):
return from_rdd(_map_value(self._rdd, func))
@computing_profile
def mapPartitions(
self, func, use_previous_behavior=True, preserves_partitioning=False, **kwargs
):
if use_previous_behavior is True:
LOGGER.warning(
f"please use `applyPartitions` instead of `mapPartitions` "
f"if the previous behavior was expected. "
f"The previous behavior will not work in future"
)
return self.applyPartitions(func)
return from_rdd(
self._rdd.mapPartitions(func, preservesPartitioning=preserves_partitioning)
)
@computing_profile
def mapReducePartitions(self, mapper, reducer, **kwargs):
return from_rdd(self._rdd.mapPartitions(mapper).reduceByKey(reducer))
@computing_profile
def applyPartitions(self, func, **kwargs):
return from_rdd(_map_partitions(self._rdd, func))
@computing_profile
def mapPartitionsWithIndex(self, func, preserves_partitioning=False, **kwargs):
return from_rdd(
self._rdd.mapPartitionsWithIndex(func, preservesPartitioning=preserves_partitioning)
)
@computing_profile
def glom(self, **kwargs):
return from_rdd(_glom(self._rdd))
@computing_profile
def sample(
self,
*,
fraction: typing.Optional[float] = None,
num: typing.Optional[int] = None,
seed=None,
):
if fraction is not None:
return from_rdd(
self._rdd.sample(fraction=fraction, withReplacement=False, seed=seed)
)
if num is not None:
return from_rdd(_exactly_sample(self._rdd, num, seed=seed))
raise ValueError(
f"exactly one of `fraction` or `num` required, fraction={fraction}, num={num}"
)
@computing_profile
def filter(self, func, **kwargs):
return from_rdd(_filter(self._rdd, func))
@computing_profile
def flatMap(self, func, **kwargs):
return from_rdd(_flat_map(self._rdd, func))
@computing_profile
def reduce(self, func, **kwargs):
return self._rdd.values().reduce(func)
@computing_profile
def collect(self, **kwargs):
# return iter(self._rdd.collect())
return self._rdd.toLocalIterator()
@computing_profile
def take(self, n=1, **kwargs):
_value = self._rdd.take(n)
if kwargs.get("filter", False):
self._rdd = self._rdd.filter(lambda xy: xy not in [_xy for _xy in _value])
return _value
@computing_profile
def first(self, **kwargs):
return self.take(1)[0]
@computing_profile
def count(self, **kwargs):
if self._count is None:
self._count = self._rdd.count()
return self._count
@computing_profile
def join(self, other: "Table", func=None, **kwargs):
return from_rdd(_join(self._rdd, other._rdd, func=func))
@computing_profile
def subtractByKey(self, other: "Table", **kwargs):
return from_rdd(_subtract_by_key(self._rdd, other._rdd))
@computing_profile
def union(self, other: "Table", func=None, **kwargs):
return from_rdd(_union(self._rdd, other._rdd, func))
def from_hdfs(paths: str, partitions, in_serialized=True, id_delimiter=None):
# noinspection PyPackageRequirements
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
fun = hdfs_utils.deserialize if in_serialized else lambda x: (x.partition(id_delimiter)[0],
x.partition(id_delimiter)[2])
rdd = materialize(
sc.textFile(paths, partitions)
.map(fun)
.repartition(partitions)
)
return Table(rdd=rdd)
def from_localfs(paths: str, partitions, in_serialized=True, id_delimiter=None):
# noinspection PyPackageRequirements
from pyspark import SparkContext
sc = SparkContext.getOrCreate()
fun = hdfs_utils.deserialize if in_serialized else lambda x: (x.partition(id_delimiter)[0],
x.partition(id_delimiter)[2])
rdd = materialize(
sc.textFile(paths, partitions)
.map(fun)
.repartition(partitions)
)
return Table(rdd=rdd)
def from_hive(tb_name, db_name, partitions):
from pyspark.sql import SparkSession
session = SparkSession.builder.enableHiveSupport().getOrCreate()
rdd = materialize(
session.sql(f"select * from {db_name}.{tb_name}")
.rdd.map(hive_utils.from_row)
.repartition(partitions)
)
return Table(rdd=rdd)
def from_rdd(rdd):
rdd = materialize(rdd)
return Table(rdd=rdd)
def _fail_on_stopiteration(fn):
# noinspection PyPackageRequirements
from pyspark import util
return util.fail_on_stopiteration(fn)
def _map(rdd, func):
def _fn(x):
return func(x[0], x[1])
def _func(_, iterator):
return map(_fail_on_stopiteration(_fn), iterator)
return rdd.mapPartitionsWithIndex(_func, preservesPartitioning=False)
def _map_value(rdd, func):
def _fn(x):
return x[0], func(x[1])
def _func(_, iterator):
return map(_fail_on_stopiteration(_fn), iterator)
return rdd.mapPartitionsWithIndex(_func, preservesPartitioning=True)
def _map_partitions(rdd, func):
def _func(_, iterator):
return [(str(uuid.uuid1()), func(iterator))]
return rdd.mapPartitionsWithIndex(_func, preservesPartitioning=False)
def _join(rdd, other, func=None):
num_partitions = max(rdd.getNumPartitions(), other.getNumPartitions())
rtn_rdd = rdd.join(other, numPartitions=num_partitions)
if func is not None:
rtn_rdd = _map_value(rtn_rdd, lambda x: func(x[0], x[1]))
return rtn_rdd
def _glom(rdd):
def _func(_, iterator):
yield list(iterator)
return rdd.mapPartitionsWithIndex(_func)
def _exactly_sample(rdd, num: int, seed: int):
split_size = rdd.mapPartitionsWithIndex(
lambda s, it: [(s, sum(1 for _ in it))]
).collectAsMap()
total = sum(split_size.values())
if num > total:
raise ValueError(f"not enough data to sample, own {total} but required {num}")
# random the size of each split
sampled_size = {}
for split, size in split_size.items():
if size <= 0:
sampled_size[split] = 0
else:
sampled_size[split] = hypergeom.rvs(M=total, n=size, N=num)
total = total - size
num = num - sampled_size[split]
return rdd.mapPartitionsWithIndex(
_ReservoirSample(split_sample_size=sampled_size, seed=seed).func,
preservesPartitioning=True,
)
class _ReservoirSample(RDDSamplerBase):
def __init__(self, split_sample_size, seed):
RDDSamplerBase.__init__(self, False, seed)
self._split_sample_size = split_sample_size
self._counter = 0
self._sample = []
def func(self, split, iterator):
self.initRandomGenerator(split)
size = self._split_sample_size[split]
for obj in iterator:
self._counter += 1
if len(self._sample) < size:
self._sample.append(obj)
continue
randint = self._random.randint(1, self._counter)
if randint <= size:
self._sample[randint - 1] = obj
return self._sample
def _filter(rdd, func):
def _fn(x):
return func(x[0], x[1])
def _func(_, iterator):
return filter(_fail_on_stopiteration(_fn), iterator)
return rdd.mapPartitionsWithIndex(_func, preservesPartitioning=True)
def _subtract_by_key(rdd, other):
return rdd.subtractByKey(other, rdd.getNumPartitions())
def _union(rdd, other, func):
num_partition = max(rdd.getNumPartitions(), other.getNumPartitions())
if func is None:
return rdd.union(other).coalesce(num_partition)
else:
def _func(pair):
iter1, iter2 = pair
val1 = list(iter1)
val2 = list(iter2)
if not val1:
return val2[0]
if not val2:
return val1[0]
return func(val1[0], val2[0])
return _map_value(rdd.cogroup(other, num_partition), _func)
def _flat_map(rdd, func):
def _fn(x):
return func(x[0], x[1])
def _func(_, iterator):
return chain.from_iterable(map(_fail_on_stopiteration(_fn), iterator))
return rdd.mapPartitionsWithIndex(_func, preservesPartitioning=False)
| 11,978 | 29.403553 | 105 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/spark/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing.spark._csession import CSession
from fate_arch.computing.spark._table import Table, from_hdfs, from_rdd, from_hive, from_localfs
from fate_arch.computing.spark._materialize import get_storage_level, materialize
__all__ = ['Table', 'CSession', 'from_hdfs', 'from_hive', 'from_localfs', 'from_rdd',
'get_storage_level', 'materialize']
| 987 | 41.956522 | 96 |
py
|
FATE
|
FATE-master/python/fate_arch/computing/spark/_csession.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Iterable
from fate_arch.abc import AddressABC
from fate_arch.abc import CSessionABC
from fate_arch.common.address import LocalFSAddress
from fate_arch.computing.spark._table import from_hdfs, from_rdd, from_hive, from_localfs
from fate_arch.common import log
LOGGER = log.getLogger()
class CSession(CSessionABC):
"""
manage RDDTable
"""
def __init__(self, session_id):
self._session_id = session_id
def load(self, address: AddressABC, partitions, schema, **kwargs):
from fate_arch.common.address import HDFSAddress
if isinstance(address, HDFSAddress):
table = from_hdfs(
paths=f"{address.name_node}/{address.path}",
partitions=partitions,
in_serialized=kwargs.get(
"in_serialized",
True),
id_delimiter=kwargs.get(
"id_delimiter",
','))
table.schema = schema
return table
from fate_arch.common.address import PathAddress
if isinstance(address, PathAddress):
from fate_arch.computing.non_distributed import LocalData
from fate_arch.computing import ComputingEngine
return LocalData(address.path, engine=ComputingEngine.SPARK)
from fate_arch.common.address import HiveAddress, LinkisHiveAddress
if isinstance(address, (HiveAddress, LinkisHiveAddress)):
table = from_hive(
tb_name=address.name,
db_name=address.database,
partitions=partitions,
)
table.schema = schema
return table
if isinstance(address, LocalFSAddress):
table = from_localfs(
paths=address.path, partitions=partitions, in_serialized=kwargs.get(
"in_serialized", True), id_delimiter=kwargs.get(
"id_delimiter", ','))
table.schema = schema
return table
raise NotImplementedError(
f"address type {type(address)} not supported with spark backend"
)
def parallelize(self, data: Iterable, partition: int, include_key: bool, **kwargs):
# noinspection PyPackageRequirements
from pyspark import SparkContext
_iter = data if include_key else enumerate(data)
rdd = SparkContext.getOrCreate().parallelize(_iter, partition)
return from_rdd(rdd)
@property
def session_id(self):
return self._session_id
def cleanup(self, name, namespace):
pass
def stop(self):
pass
def kill(self):
pass
def destroy(self):
pass
| 3,335 | 31.705882 | 89 |
py
|
FATE
|
FATE-master/python/fate_arch/session/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.computing import is_table
from fate_arch.common._parties import PartiesInfo, Role
from fate_arch.session._session import Session, computing_session, get_session, get_parties, get_computing_session
__all__ = [
'is_table',
'Session',
'PartiesInfo',
'computing_session',
'get_session',
'get_parties',
'get_computing_session',
'Role']
| 997 | 31.193548 | 114 |
py
|
FATE
|
FATE-master/python/fate_arch/session/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import uuid
import peewee
from fate_arch.abc import CSessionABC, FederationABC, CTableABC, StorageSessionABC, StorageTableABC, StorageTableMetaABC
from fate_arch.common import engine_utils, EngineType, Party
from fate_arch.common import log, base_utils
from fate_arch.common import remote_status
from fate_arch.common._parties import PartiesInfo
from fate_arch.computing import ComputingEngine
from fate_arch.federation import FederationEngine
from fate_arch.metastore.db_models import DB, SessionRecord, init_database_tables
from fate_arch.storage import StorageEngine, StorageSessionBase
LOGGER = log.getLogger()
class Session(object):
__GLOBAL_SESSION = None
@classmethod
def get_global(cls):
return cls.__GLOBAL_SESSION
@classmethod
def _as_global(cls, sess):
cls.__GLOBAL_SESSION = sess
def as_global(self):
self._as_global(self)
return self
def __init__(self, session_id: str = None, options=None):
if options is None:
options = {}
engines = engine_utils.get_engines()
LOGGER.info(f"using engines: {engines}")
computing_type = engines.get(EngineType.COMPUTING, None)
if computing_type is None:
raise RuntimeError(f"must set default engines on conf/service_conf.yaml")
self._computing_type = engines.get(EngineType.COMPUTING, None)
self._federation_type = engines.get(EngineType.FEDERATION, None)
self._storage_engine = engines.get(EngineType.STORAGE, None)
self._computing_session: typing.Optional[CSessionABC] = None
self._federation_session: typing.Optional[FederationABC] = None
self._storage_session: typing.Dict[StorageSessionABC] = {}
self._parties_info: typing.Optional[PartiesInfo] = None
self._all_party_info: typing.List[Party] = []
self._session_id = str(uuid.uuid1()) if not session_id else session_id
self._logger = LOGGER if options.get("logger", None) is None else options.get("logger", None)
self._logger.info(f"create manager session {self._session_id}")
# init meta db
init_database_tables()
@property
def session_id(self) -> str:
return self._session_id
def _open(self):
return self
def _close(self):
self.destroy_all_sessions()
def __enter__(self):
return self._open()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_tb:
self._logger.exception("", exc_info=(exc_type, exc_val, exc_tb))
return self._close()
def init_computing(self,
computing_session_id: str = None,
record: bool = True,
**kwargs):
computing_session_id = f"{self._session_id}_computing_{uuid.uuid1()}" if not computing_session_id else computing_session_id
if self.is_computing_valid:
raise RuntimeError(f"computing session already valid")
if record:
self.save_record(engine_type=EngineType.COMPUTING,
engine_name=self._computing_type,
engine_session_id=computing_session_id)
if self._computing_type == ComputingEngine.STANDALONE:
from fate_arch.computing.standalone import CSession
options = kwargs.get("options", {})
self._computing_session = CSession(session_id=computing_session_id, options=options)
self._computing_type = ComputingEngine.STANDALONE
return self
if self._computing_type == ComputingEngine.EGGROLL:
from fate_arch.computing.eggroll import CSession
options = kwargs.get("options", {})
self._computing_session = CSession(
session_id=computing_session_id, options=options
)
return self
if self._computing_type == ComputingEngine.SPARK:
from fate_arch.computing.spark import CSession
self._computing_session = CSession(session_id=computing_session_id)
self._computing_type = ComputingEngine.SPARK
return self
if self._computing_type == ComputingEngine.LINKIS_SPARK:
from fate_arch.computing.spark import CSession
self._computing_session = CSession(session_id=computing_session_id)
self._computing_type = ComputingEngine.LINKIS_SPARK
return self
raise RuntimeError(f"{self._computing_type} not supported")
def init_federation(
self,
federation_session_id: str,
*,
runtime_conf: typing.Optional[dict] = None,
parties_info: typing.Optional[PartiesInfo] = None,
service_conf: typing.Optional[dict] = None,
record: bool = True,
):
if record:
self.save_record(engine_type=EngineType.FEDERATION,
engine_name=self._federation_type,
engine_session_id=federation_session_id,
engine_runtime_conf={"runtime_conf": runtime_conf, "service_conf": service_conf})
if parties_info is None:
if runtime_conf is None:
raise RuntimeError(f"`party_info` and `runtime_conf` are both `None`")
parties_info = PartiesInfo.from_conf(runtime_conf)
self._parties_info = parties_info
self._all_party_info = [Party(k, p) for k, v in runtime_conf['role'].items() for p in v]
if self.is_federation_valid:
raise RuntimeError("federation session already valid")
if self._federation_type == FederationEngine.STANDALONE:
from fate_arch.computing.standalone import CSession
from fate_arch.federation.standalone import Federation
if not self.is_computing_valid or not isinstance(
self._computing_session, CSession
):
raise RuntimeError(
f"require computing with type {ComputingEngine.STANDALONE} valid"
)
self._federation_session = Federation(
standalone_session=self._computing_session.get_standalone_session(),
federation_session_id=federation_session_id,
party=parties_info.local_party,
)
return self
if self._federation_type == FederationEngine.EGGROLL:
from fate_arch.computing.eggroll import CSession
from fate_arch.federation.eggroll import Federation
if not self.is_computing_valid or not isinstance(
self._computing_session, CSession
):
raise RuntimeError(
f"require computing with type {ComputingEngine.EGGROLL} valid"
)
self._federation_session = Federation(
rp_ctx=self._computing_session.get_rpc(),
rs_session_id=federation_session_id,
party=parties_info.local_party,
proxy_endpoint=f"{service_conf['host']}:{service_conf['port']}",
)
return self
if self._federation_type == FederationEngine.RABBITMQ:
from fate_arch.federation.rabbitmq import Federation
self._federation_session = Federation.from_conf(
federation_session_id=federation_session_id,
party=parties_info.local_party,
runtime_conf=runtime_conf,
rabbitmq_config=service_conf,
)
return self
# Add pulsar support
if self._federation_type == FederationEngine.PULSAR:
from fate_arch.federation.pulsar import Federation
self._federation_session = Federation.from_conf(
federation_session_id=federation_session_id,
party=parties_info.local_party,
runtime_conf=runtime_conf,
pulsar_config=service_conf,
)
return self
raise RuntimeError(f"{self._federation_type} not supported")
def _get_or_create_storage(self,
storage_session_id=None,
storage_engine=None,
record: bool = True,
**kwargs) -> StorageSessionABC:
storage_session_id = f"{self._session_id}_storage_{uuid.uuid1()}" if not storage_session_id else storage_session_id
if storage_session_id in self._storage_session:
return self._storage_session[storage_session_id]
else:
if storage_engine is None:
storage_engine = self._storage_engine
for session in self._storage_session.values():
if storage_engine == session.engine:
return session
if record:
self.save_record(engine_type=EngineType.STORAGE,
engine_name=storage_engine,
engine_session_id=storage_session_id)
if storage_engine == StorageEngine.EGGROLL:
from fate_arch.storage.eggroll import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.STANDALONE:
from fate_arch.storage.standalone import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.MYSQL:
from fate_arch.storage.mysql import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.HDFS:
from fate_arch.storage.hdfs import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.HIVE:
from fate_arch.storage.hive import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.LINKIS_HIVE:
from fate_arch.storage.linkis_hive import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.PATH:
from fate_arch.storage.path import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.LOCALFS:
from fate_arch.storage.localfs import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
elif storage_engine == StorageEngine.API:
from fate_arch.storage.api import StorageSession
storage_session = StorageSession(session_id=storage_session_id, options=kwargs.get("options", {}))
else:
raise NotImplementedError(f"can not be initialized with storage engine: {storage_engine}")
self._storage_session[storage_session_id] = storage_session
return storage_session
def get_table(self, name, namespace, ignore_disable=False) -> typing.Union[StorageTableABC, None]:
meta = Session.get_table_meta(name=name, namespace=namespace)
if meta is None:
return None
if meta.get_disable() and not ignore_disable:
raise Exception(f"table {namespace} {name} disable: {meta.get_disable()}")
engine = meta.get_engine()
storage_session = self._get_or_create_storage(storage_engine=engine)
table = storage_session.get_table(name=name, namespace=namespace)
return table
@classmethod
def get_table_meta(cls, name, namespace) -> typing.Union[StorageTableMetaABC, None]:
meta = StorageSessionBase.get_table_meta(name=name, namespace=namespace)
return meta
@classmethod
def persistent(cls, computing_table: CTableABC, namespace, name, schema=None, part_of_data=None,
engine=None, engine_address=None, store_type=None, token: typing.Dict = None) -> StorageTableMetaABC:
return StorageSessionBase.persistent(computing_table=computing_table,
namespace=namespace,
name=name,
schema=schema,
part_of_data=part_of_data,
engine=engine,
engine_address=engine_address,
store_type=store_type,
token=token)
@property
def computing(self) -> CSessionABC:
return self._computing_session
@property
def federation(self) -> FederationABC:
return self._federation_session
def storage(self, **kwargs):
return self._get_or_create_storage(**kwargs)
@property
def parties(self):
return self._parties_info
@property
def is_computing_valid(self):
return self._computing_session is not None
@property
def is_federation_valid(self):
return self._federation_session is not None
@DB.connection_context()
def save_record(self, engine_type, engine_name, engine_session_id, engine_runtime_conf=None):
self._logger.info(
f"try to save session record for manager {self._session_id}, {engine_type} {engine_name}"
f" {engine_session_id}")
session_record = SessionRecord()
session_record.f_manager_session_id = self._session_id
session_record.f_engine_type = engine_type
session_record.f_engine_name = engine_name
session_record.f_engine_session_id = engine_session_id
session_record.f_engine_address = engine_runtime_conf if engine_runtime_conf else {}
session_record.f_create_time = base_utils.current_timestamp()
msg = f"save storage session record for manager {self._session_id}, {engine_type} {engine_name} " \
f"{engine_session_id}"
try:
effect_count = session_record.save(force_insert=True)
if effect_count != 1:
raise RuntimeError(f"{msg} failed")
except peewee.IntegrityError as e:
LOGGER.warning(e)
except Exception as e:
raise RuntimeError(f"{msg} exception", e)
self._logger.info(
f"save session record for manager {self._session_id}, {engine_type} {engine_name} "
f"{engine_session_id} successfully")
@DB.connection_context()
def delete_session_record(self, engine_session_id, manager_session_id=None):
if not manager_session_id:
rows = SessionRecord.delete().where(SessionRecord.f_engine_session_id == engine_session_id).execute()
else:
rows = SessionRecord.delete().where(SessionRecord.f_engine_session_id == engine_session_id,
SessionRecord.f_manager_session_id == manager_session_id).execute()
if rows > 0:
self._logger.info(f"delete session {engine_session_id} record successfully")
else:
self._logger.warning(f"delete session {engine_session_id} record failed")
@classmethod
@DB.connection_context()
def query_sessions(cls, reverse=None, order_by=None, **kwargs):
try:
session_records = SessionRecord.query(reverse=reverse, order_by=order_by, **kwargs)
return session_records
except BaseException:
return []
@DB.connection_context()
def get_session_from_record(self, **kwargs):
self._logger.info(f"query by manager session id {self._session_id}")
session_records = self.query_sessions(manager_session_id=self.session_id, **kwargs)
self._logger.info([session_record.f_engine_session_id for session_record in session_records])
for session_record in session_records:
try:
engine_session_id = session_record.f_engine_session_id
if session_record.f_engine_type == EngineType.COMPUTING:
self._init_computing_if_not_valid(computing_session_id=engine_session_id)
elif session_record.f_engine_type == EngineType.STORAGE:
self._get_or_create_storage(storage_session_id=engine_session_id,
storage_engine=session_record.f_engine_name,
record=False)
elif session_record.f_engine_type == EngineType.FEDERATION:
self._logger.info(f"engine runtime conf: {session_record.f_engine_address}")
self._init_federation_if_not_valid(federation_session_id=engine_session_id,
engine_runtime_conf=session_record.f_engine_address)
except Exception as e:
self._logger.info(e)
self.delete_session_record(engine_session_id=session_record.f_engine_session_id)
def _init_computing_if_not_valid(self, computing_session_id):
if not self.is_computing_valid:
self.init_computing(computing_session_id=computing_session_id, record=False)
return True
elif self._computing_session.session_id != computing_session_id:
self._logger.warning(
f"manager session had computing session {self._computing_session.session_id} "
f"different with query from db session {computing_session_id}")
return False
else:
# already exists
return True
def _init_federation_if_not_valid(self, federation_session_id, engine_runtime_conf):
if not self.is_federation_valid:
try:
self._logger.info(f"init federation session {federation_session_id} type {self._federation_type}")
self.init_federation(federation_session_id=federation_session_id,
runtime_conf=engine_runtime_conf.get("runtime_conf"),
service_conf=engine_runtime_conf.get("service_conf"),
record=False)
self._logger.info(f"init federation session {federation_session_id} type {self._federation_type} done")
return True
except Exception as e:
self._logger.warning(
f"init federation session {federation_session_id} type {self._federation_type} failed: {e}")
return False
elif self._federation_session.session_id != federation_session_id:
self._logger.warning(
f"manager session had federation session {self._federation_session.session_id} different with query from db session {federation_session_id}")
return False
else:
# already exists
return True
def destroy_all_sessions(self, **kwargs):
self._logger.info(f"start destroy manager session {self._session_id} all sessions")
self.get_session_from_record(**kwargs)
self.destroy_federation_session()
self.destroy_storage_session()
self.destroy_computing_session()
self._logger.info(f"finish destroy manager session {self._session_id} all sessions")
def destroy_computing_session(self):
if self.is_computing_valid:
try:
self._logger.info(f"try to destroy computing session {self._computing_session.session_id}")
self._computing_session.destroy()
except Exception as e:
self._logger.info(f"destroy computing session {self._computing_session.session_id} failed", e)
self.delete_session_record(engine_session_id=self._computing_session.session_id)
self._computing_session = None
def destroy_storage_session(self):
for session_id, session in self._storage_session.items():
try:
self._logger.info(f"try to destroy storage session {session_id}")
session.destroy()
self._logger.info(f"destroy storage session {session_id} successfully")
except Exception as e:
self._logger.exception(f"destroy storage session {session_id} failed", e)
self.delete_session_record(engine_session_id=session_id)
self._storage_session = {}
def destroy_federation_session(self):
if self.is_federation_valid:
try:
if self._parties_info.local_party.role != "local":
self._logger.info(
f"try to destroy federation session {self._federation_session.session_id} type"
f" {EngineType.FEDERATION} role {self._parties_info.local_party.role}")
self._federation_session.destroy(parties=self._all_party_info)
self._logger.info(f"destroy federation session {self._federation_session.session_id} done")
except Exception as e:
self._logger.info(f"destroy federation failed: {e}")
self.delete_session_record(engine_session_id=self._federation_session.session_id,
manager_session_id=self.session_id)
self._federation_session = None
def wait_remote_all_done(self, timeout=None):
LOGGER.info(f"remote futures: {remote_status._remote_futures}, waiting...")
remote_status.wait_all_remote_done(timeout)
LOGGER.info(f"remote futures: {remote_status._remote_futures}, all done")
def get_session() -> Session:
return Session.get_global()
def get_parties() -> PartiesInfo:
return get_session().parties
def get_computing_session() -> CSessionABC:
return get_session().computing
# noinspection PyPep8Naming
class computing_session(object):
@staticmethod
def init(session_id, options=None):
Session(options=options).as_global().init_computing(session_id)
@staticmethod
def parallelize(data: typing.Iterable, partition: int, include_key: bool, **kwargs) -> CTableABC:
return get_computing_session().parallelize(data, partition=partition, include_key=include_key, **kwargs)
@staticmethod
def stop():
return get_computing_session().stop()
| 23,317 | 43.5 | 157 |
py
|
FATE
|
FATE-master/python/fate_arch/tests/test_arch_api.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import numpy as np
from fate_arch import session
sess = session.Session()
sess.init_computing()
data = []
for i in range(10):
features = np.random.random(10)
features = ",".join([str(x) for x in features])
data.append((i, features))
c_table = sess.computing.parallelize(data, include_key=True, partition=4)
for k, v in c_table.collect():
print(v)
print()
table_meta = sess.persistent(computing_table=c_table, namespace="experiment", name=str(uuid.uuid1()))
storage_session = sess.storage()
s_table = storage_session.get_table(namespace=table_meta.get_namespace(), name=table_meta.get_name())
for k, v in s_table.collect():
print(v)
print()
t2 = sess.computing.load(
table_meta.get_address(),
partitions=table_meta.get_partitions(),
schema=table_meta.get_schema())
for k, v in t2.collect():
print(v)
sess.destroy_all_sessions()
| 1,500 | 27.865385 | 101 |
py
|
FATE
|
FATE-master/python/fate_arch/tests/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/tests/computing/spark_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
sc = SparkContext('local', 'test')
a = []
for i in range(10):
a.append((i, str(i)))
rdd1 = sc.parallelize(a)
rdd2 = rdd1.mapValues(f=lambda x: x + "1")
for k, v in rdd2.collect():
print(f"{type(k)}: {k} {type(v)} {v}")
| 877 | 32.769231 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/tests/storage/metastore_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from fate_arch.metastore import base_model
class TestBaseModel(unittest.TestCase):
def test_auto_date_timestamp_field(self):
self.assertEqual(
base_model.auto_date_timestamp_field(), {
'write_access_time', 'create_time', 'read_access_time', 'end_time', 'update_time', 'start_time'})
def test(self):
from peewee import IntegerField, FloatField, AutoField, BigAutoField, BigIntegerField, BitField
from peewee import CharField, TextField, BooleanField, BigBitField
from fate_arch.metastore.base_model import JSONField, LongTextField
for f in {IntegerField, FloatField, AutoField, BigAutoField, BigIntegerField, BitField}:
self.assertEqual(base_model.is_continuous_field(f), True)
for f in {CharField, TextField, BooleanField, BigBitField}:
self.assertEqual(base_model.is_continuous_field(f), False)
for f in {JSONField, LongTextField}:
self.assertEqual(base_model.is_continuous_field(f), False)
if __name__ == '__main__':
unittest.main()
| 1,704 | 41.625 | 113 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/_types.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_ID_DELIMITER = ","
class StorageTableOrigin(object):
TABLE_BIND = "table_bind"
READER = "reader"
UPLOAD = "upload"
OUTPUT = "output"
class StorageEngine(object):
STANDALONE = 'STANDALONE'
EGGROLL = 'EGGROLL'
HDFS = 'HDFS'
MYSQL = 'MYSQL'
SIMPLE = 'SIMPLE'
PATH = 'PATH'
HIVE = 'HIVE'
LINKIS_HIVE = 'LINKIS_HIVE'
LOCALFS = 'LOCALFS'
API = 'API'
class StandaloneStoreType(object):
ROLLPAIR_IN_MEMORY = 'IN_MEMORY'
ROLLPAIR_LMDB = 'LMDB'
DEFAULT = ROLLPAIR_LMDB
class EggRollStoreType(object):
ROLLPAIR_IN_MEMORY = 'IN_MEMORY'
ROLLPAIR_LMDB = 'LMDB'
ROLLPAIR_LEVELDB = 'LEVEL_DB'
ROLLFRAME_FILE = 'ROLL_FRAME_FILE'
ROLLPAIR_ROLLSITE = 'ROLL_SITE'
ROLLPAIR_FILE = 'ROLL_PAIR_FILE'
ROLLPAIR_MMAP = 'ROLL_PAIR_MMAP'
ROLLPAIR_CACHE = 'ROLL_PAIR_CACHE'
ROLLPAIR_QUEUE = 'ROLL_PAIR_QUEUE'
DEFAULT = ROLLPAIR_LMDB
class HDFSStoreType(object):
RAM_DISK = 'RAM_DISK'
SSD = 'SSD'
DISK = 'DISK'
ARCHIVE = 'ARCHIVE'
DEFAULT = None
class PathStoreType(object):
PICTURE = 'PICTURE'
class FileStoreType(object):
CSV = 'CSV'
class ApiStoreType(object):
EXTERNAL = 'EXTERNAL'
class MySQLStoreType(object):
InnoDB = "InnoDB"
MyISAM = "MyISAM"
ISAM = "ISAM"
HEAP = "HEAP"
DEFAULT = None
class HiveStoreType(object):
DEFAULT = "HDFS"
class LinkisHiveStoreType(object):
DEFAULT = "HDFS"
class LocalFSStoreType(object):
RAM_DISK = 'RAM_DISK'
SSD = 'SSD'
DISK = 'DISK'
ARCHIVE = 'ARCHIVE'
DEFAULT = None
class StorageTableMetaType(object):
ENGINE = "engine"
TYPE = "type"
SCHEMA = "schema"
PART_OF_DATA = "part_of_data"
COUNT = "count"
PARTITIONS = "partitions"
| 2,398 | 21.009174 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/_utils.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_arch/storage/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
from typing import Iterable
import peewee
from fate_arch.abc import StorageTableMetaABC, StorageTableABC, AddressABC
from fate_arch.common.base_utils import current_timestamp
from fate_arch.common.log import getLogger
from fate_arch.relation_ship import Relationship
from fate_arch.metastore.db_models import DB, StorageTableMetaModel
LOGGER = getLogger()
class StorageTableBase(StorageTableABC):
def __init__(self, name, namespace, address, partitions, options, engine, store_type):
self._name = name
self._namespace = namespace
self._address = address
self._partitions = partitions
self._options = options if options else {}
self._engine = engine
self._store_type = store_type
self._meta = None
self._read_access_time = None
self._write_access_time = None
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def address(self):
return self._address
@property
def partitions(self):
return self._partitions
@property
def options(self):
return self._options
@property
def engine(self):
return self._engine
@property
def store_type(self):
return self._store_type
@property
def meta(self):
return self._meta
@meta.setter
def meta(self, meta):
self._meta = meta
@property
def read_access_time(self):
return self._read_access_time
@property
def write_access_time(self):
return self._write_access_time
def update_meta(self,
schema=None,
count=None,
part_of_data=None,
description=None,
partitions=None,
**kwargs):
self._meta.update_metas(schema=schema,
count=count,
part_of_data=part_of_data,
description=description,
partitions=partitions,
**kwargs)
def create_meta(self, **kwargs):
table_meta = StorageTableMeta(name=self._name, namespace=self._namespace, new=True)
table_meta.set_metas(**kwargs)
table_meta.address = self._address
table_meta.partitions = self._partitions
table_meta.engine = self._engine
table_meta.store_type = self._store_type
table_meta.options = self._options
table_meta.create()
self._meta = table_meta
return table_meta
def check_address(self):
return True
def put_all(self, kv_list: Iterable, **kwargs):
self._update_write_access_time()
self._put_all(kv_list, **kwargs)
def collect(self, **kwargs) -> list:
self._update_read_access_time()
return self._collect(**kwargs)
def count(self):
self._update_read_access_time()
count = self._count()
self.meta.update_metas(count=count)
return count
def read(self):
self._update_read_access_time()
return self._read()
def destroy(self):
self.meta.destroy_metas()
self._destroy()
def save_as(self, address, name, namespace, partitions=None, **kwargs):
table = self._save_as(address, name, namespace, partitions, **kwargs)
table.create_meta(**kwargs)
return table
def _update_read_access_time(self, read_access_time=None):
read_access_time = current_timestamp() if not read_access_time else read_access_time
self._meta.update_metas(read_access_time=read_access_time)
def _update_write_access_time(self, write_access_time=None):
write_access_time = current_timestamp() if not write_access_time else write_access_time
self._meta.update_metas(write_access_time=write_access_time)
# to be implemented
def _put_all(self, kv_list: Iterable, **kwargs):
raise NotImplementedError()
def _collect(self, **kwargs) -> list:
raise NotImplementedError()
def _count(self):
raise NotImplementedError()
def _read(self):
raise NotImplementedError()
def _destroy(self):
raise NotImplementedError()
def _save_as(self, address, name, namespace, partitions=None, schema=None, **kwargs):
raise NotImplementedError()
class StorageTableMeta(StorageTableMetaABC):
def __init__(self, name, namespace, new=False, create_address=True):
self.name = name
self.namespace = namespace
self.address = None
self.engine = None
self.store_type = None
self.options = None
self.partitions = None
self.in_serialized = None
self.have_head = None
self.id_delimiter = None
self.extend_sid = False
self.auto_increasing_sid = None
self.schema = None
self.count = None
self.part_of_data = None
self.description = None
self.origin = None
self.disable = None
self.create_time = None
self.update_time = None
self.read_access_time = None
self.write_access_time = None
if self.options is None:
self.options = {}
if self.schema is None:
self.schema = {}
if self.part_of_data is None:
self.part_of_data = []
if not new:
self.build(create_address)
def build(self, create_address):
for k, v in self.table_meta.__dict__["__data__"].items():
setattr(self, k.lstrip("f_"), v)
if create_address:
self.address = self.create_address(storage_engine=self.engine, address_dict=self.address)
def __new__(cls, *args, **kwargs):
if not kwargs.get("new", False):
name, namespace = kwargs.get("name"), kwargs.get("namespace")
if not name or not namespace:
return None
tables_meta = cls.query_table_meta(filter_fields=dict(name=name, namespace=namespace))
if not tables_meta:
return None
self = super().__new__(cls)
setattr(self, "table_meta", tables_meta[0])
return self
else:
return super().__new__(cls)
def exists(self):
if hasattr(self, "table_meta"):
return True
else:
return False
@DB.connection_context()
def create(self):
table_meta = StorageTableMetaModel()
table_meta.f_create_time = current_timestamp()
table_meta.f_schema = {}
table_meta.f_part_of_data = []
for k, v in self.to_dict().items():
attr_name = 'f_%s' % k
if hasattr(StorageTableMetaModel, attr_name):
setattr(table_meta, attr_name, v if not issubclass(type(v), AddressABC) else v.__dict__)
try:
rows = table_meta.save(force_insert=True)
if rows != 1:
raise Exception("create table meta failed")
except peewee.IntegrityError as e:
if e.args[0] == 1062:
# warning
pass
elif isinstance(e.args[0], str) and "UNIQUE constraint failed" in e.args[0]:
pass
else:
raise e
except Exception as e:
raise e
def set_metas(self, **kwargs):
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
@classmethod
@DB.connection_context()
def query_table_meta(cls, filter_fields, query_fields=None):
filters = []
querys = []
for f_n, f_v in filter_fields.items():
attr_name = 'f_%s' % f_n
if hasattr(StorageTableMetaModel, attr_name):
filters.append(operator.attrgetter('f_%s' % f_n)(StorageTableMetaModel) == f_v)
if query_fields:
for f_n in query_fields:
attr_name = 'f_%s' % f_n
if hasattr(StorageTableMetaModel, attr_name):
querys.append(operator.attrgetter('f_%s' % f_n)(StorageTableMetaModel))
if filters:
if querys:
tables_meta = StorageTableMetaModel.select(querys).where(*filters)
else:
tables_meta = StorageTableMetaModel.select().where(*filters)
return [table_meta for table_meta in tables_meta]
else:
# not allow query all table
return []
@DB.connection_context()
def update_metas(self, schema=None, count=None, part_of_data=None, description=None, partitions=None,
in_serialized=None, **kwargs):
meta_info = {}
for k, v in locals().items():
if k not in ["self", "kwargs", "meta_info"] and v is not None:
meta_info[k] = v
meta_info.update(kwargs)
meta_info["name"] = meta_info.get("name", self.name)
meta_info["namespace"] = meta_info.get("namespace", self.namespace)
update_filters = []
primary_keys = StorageTableMetaModel._meta.primary_key.field_names
for p_k in primary_keys:
update_filters.append(operator.attrgetter(p_k)(StorageTableMetaModel) == meta_info[p_k.lstrip("f_")])
table_meta = StorageTableMetaModel()
update_fields = {}
for k, v in meta_info.items():
attr_name = 'f_%s' % k
if hasattr(StorageTableMetaModel, attr_name) and attr_name not in primary_keys:
if k == "part_of_data":
if len(v) < 100:
tmp = v
else:
tmp = v[:100]
update_fields[operator.attrgetter(attr_name)(StorageTableMetaModel)] = tmp
else:
update_fields[operator.attrgetter(attr_name)(StorageTableMetaModel)] = v
if update_filters:
operate = table_meta.update(update_fields).where(*update_filters)
else:
operate = table_meta.update(update_fields)
if count:
self.count = count
_return = operate.execute()
_meta = StorageTableMeta(name=self.name, namespace=self.namespace)
return _return > 0, _meta
@DB.connection_context()
def destroy_metas(self):
StorageTableMetaModel \
.delete() \
.where(StorageTableMetaModel.f_name == self.name,
StorageTableMetaModel.f_namespace == self.namespace) \
.execute()
@classmethod
def create_address(cls, storage_engine, address_dict):
address_class = Relationship.EngineToAddress.get(storage_engine)
kwargs = {}
for k in address_class.__init__.__code__.co_varnames:
if k == "self":
continue
if address_dict.get(k, None):
kwargs[k] = address_dict[k]
return address_class(**kwargs)
def get_name(self):
return self.name
def get_namespace(self):
return self.namespace
def get_address(self):
return self.address
def get_engine(self):
return self.engine
def get_store_type(self):
return self.store_type
def get_options(self):
return self.options
def get_partitions(self):
return self.partitions
def get_in_serialized(self):
return self.in_serialized
def get_id_delimiter(self):
return self.id_delimiter
def get_extend_sid(self):
return self.extend_sid
def get_auto_increasing_sid(self):
return self.auto_increasing_sid
def get_have_head(self):
return self.have_head
def get_origin(self):
return self.origin
def get_disable(self):
return self.disable
def get_schema(self):
return self.schema
def get_count(self):
return self.count
def get_part_of_data(self):
return self.part_of_data
def get_description(self):
return self.description
def to_dict(self) -> dict:
d = {}
for k, v in self.__dict__.items():
if v is None or k == "table_meta":
continue
d[k] = v
return d
| 12,935 | 30.862069 | 113 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/__init__.py
|
from fate_arch.storage._types import StorageTableMetaType, StorageEngine
from fate_arch.storage._types import StandaloneStoreType, EggRollStoreType, \
HDFSStoreType, MySQLStoreType, \
PathStoreType, HiveStoreType, LinkisHiveStoreType, LocalFSStoreType, ApiStoreType
from fate_arch.storage._types import DEFAULT_ID_DELIMITER, StorageTableOrigin
from fate_arch.storage._session import StorageSessionBase
from fate_arch.storage._table import StorageTableBase, StorageTableMeta
| 483 | 59.5 | 85 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import typing
from fate_arch.abc import StorageSessionABC, CTableABC
from fate_arch.common import EngineType, engine_utils
from fate_arch.common.data_utils import default_output_fs_path
from fate_arch.common.log import getLogger
from fate_arch.storage._table import StorageTableMeta
from fate_arch.storage._types import StorageEngine, EggRollStoreType, StandaloneStoreType, HDFSStoreType, HiveStoreType, \
LinkisHiveStoreType, LocalFSStoreType, PathStoreType, StorageTableOrigin
from fate_arch.relation_ship import Relationship
from fate_arch.common.base_utils import current_timestamp
LOGGER = getLogger()
class StorageSessionBase(StorageSessionABC):
def __init__(self, session_id, engine):
self._session_id = session_id
self._engine = engine
def create_table(self, address, name, namespace, partitions=None, **kwargs):
table = self.table(address=address, name=name, namespace=namespace, partitions=partitions, **kwargs)
table.create_meta(**kwargs)
return table
def get_table(self, name, namespace):
meta = StorageTableMeta(name=name, namespace=namespace)
if meta and meta.exists():
table = self.table(name=meta.get_name(),
namespace=meta.get_namespace(),
address=meta.get_address(),
partitions=meta.get_partitions(),
store_type=meta.get_store_type(),
options=meta.get_options())
table.meta = meta
return table
else:
return None
@classmethod
def get_table_meta(cls, name, namespace):
meta = StorageTableMeta(name=name, namespace=namespace)
if meta and meta.exists():
return meta
else:
return None
@classmethod
def persistent(cls, computing_table: CTableABC, namespace, name, schema=None,
part_of_data=None, engine=None, engine_address=None,
store_type=None, token: typing.Dict = None) -> StorageTableMeta:
if engine:
if engine != StorageEngine.PATH and engine not in Relationship.Computing.get(
computing_table.engine, {}).get(EngineType.STORAGE, {}).get("support", []):
raise Exception(f"storage engine {engine} not supported with computing engine {computing_table.engine}")
else:
engine = Relationship.Computing.get(
computing_table.engine,
{}).get(
EngineType.STORAGE,
{}).get(
"default",
None)
if not engine:
raise Exception(f"can not found {computing_table.engine} default storage engine")
if engine_address is None:
# find engine address from service_conf.yaml
engine_address = engine_utils.get_engines_config_from_conf().get(EngineType.STORAGE, {}).get(engine, {})
address_dict = engine_address.copy()
partitions = computing_table.partitions
if engine == StorageEngine.STANDALONE:
address_dict.update({"name": name, "namespace": namespace})
store_type = StandaloneStoreType.ROLLPAIR_LMDB if store_type is None else store_type
elif engine == StorageEngine.EGGROLL:
address_dict.update({"name": name, "namespace": namespace})
store_type = EggRollStoreType.ROLLPAIR_LMDB if store_type is None else store_type
elif engine == StorageEngine.HIVE:
address_dict.update({"database": namespace, "name": f"{name}"})
store_type = HiveStoreType.DEFAULT if store_type is None else store_type
elif engine == StorageEngine.LINKIS_HIVE:
address_dict.update({"database": None, "name": f"{namespace}_{name}",
"username": token.get("username", "")})
store_type = LinkisHiveStoreType.DEFAULT if store_type is None else store_type
elif engine == StorageEngine.HDFS:
if not address_dict.get("path"):
address_dict.update({"path": default_output_fs_path(
name=name, namespace=namespace, prefix=address_dict.get("path_prefix"))})
store_type = HDFSStoreType.DISK if store_type is None else store_type
elif engine == StorageEngine.LOCALFS:
if not address_dict.get("path"):
address_dict.update({"path": default_output_fs_path(
name=name, namespace=namespace, storage_engine=StorageEngine.LOCALFS)})
store_type = LocalFSStoreType.DISK if store_type is None else store_type
elif engine == StorageEngine.PATH:
store_type = PathStoreType.PICTURE if store_type is None else store_type
else:
raise RuntimeError(f"{engine} storage is not supported")
address = StorageTableMeta.create_address(storage_engine=engine, address_dict=address_dict)
schema = schema if schema else {}
computing_table.save(address, schema=schema, partitions=partitions, store_type=store_type)
table_count = computing_table.count()
table_meta = StorageTableMeta(name=name, namespace=namespace, new=True)
table_meta.address = address
table_meta.partitions = computing_table.partitions
table_meta.engine = engine
table_meta.store_type = store_type
table_meta.schema = schema
table_meta.part_of_data = part_of_data if part_of_data else {}
table_meta.count = table_count
table_meta.write_access_time = current_timestamp()
table_meta.origin = StorageTableOrigin.OUTPUT
table_meta.create()
return table_meta
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.destroy()
def destroy(self):
try:
self.stop()
except Exception as e:
LOGGER.warning(f"stop storage session {self._session_id} failed, try to kill", e)
self.kill()
def table(self, name, namespace, address, store_type, partitions=None, **kwargs):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
def kill(self):
raise NotImplementedError()
def cleanup(self, name, namespace):
raise NotImplementedError()
@property
def session_id(self):
return self._session_id
@property
def engine(self):
return self._engine
| 7,181 | 40.755814 | 122 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/linkis_hive/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import requests
from fate_arch.storage import StorageEngine, LinkisHiveStoreType
from fate_arch.storage import StorageTableBase
from fate_arch.storage.linkis_hive._settings import (
Token_Code,
Token_User,
STATUS_URI,
EXECUTE_URI,
)
class StorageTable(StorageTableBase):
def __init__(
self,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
storage_type: LinkisHiveStoreType = LinkisHiveStoreType.DEFAULT,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.LINKIS_HIVE,
store_type=storage_type,
)
def _count(self, **kwargs):
sql = "select count(*) from {}".format(self._address.name)
try:
count = self.execute(sql)
except BaseException:
count = 0
return count
def _collect(self, **kwargs):
if kwargs.get("is_spark"):
from pyspark.sql import SparkSession
session = SparkSession.builder.enableHiveSupport().getOrCreate()
data = session.sql(
f"select * from {self._address.database}.{self._address.name}"
)
return data
else:
sql = "select * from {}.{}".format(
self._address.database, self._address.name
)
data = self.execute(sql)
for i in data:
yield i[0], self.meta.get_id_delimiter().join(list(i[1:]))
def _put_all(self, kv_pd, **kwargs):
from pyspark.sql import SparkSession
session = SparkSession.builder.enableHiveSupport().getOrCreate()
session.sql("use {}".format(self._address.database))
spark_df = session.createDataFrame(kv_pd)
spark_df.write.saveAsTable(self._address.name, format="orc")
def _destroy(self):
sql = "drop table {}.{}".format(self._address.database, self._address.name)
return self.execute(sql)
def _save_as(self, address, name, namespace, partitions, **kwargs):
pass
def execute(self, sql):
exec_id = self._execute_entrance(sql)
while True:
status = self._status_entrance(exec_id)
if status:
break
time.sleep(1)
return self._result_entrance()
def _execute_entrance(self, sql):
execute_url = f"http://{self._address.host}:{self._address.port}{EXECUTE_URI}"
data = {
"method": EXECUTE_URI,
"params": self._address.params,
"executeApplicationName": self._address.execute_application_name,
"executionCode": sql,
"runType": self._address.run_type,
"source": self._address.source,
}
# token
headers = {
"Token-Code": Token_Code,
"Token-User": Token_User,
"Content-Type": "application/json",
}
execute_response = requests.post(url=execute_url, headers=headers, json=data)
if execute_response.json().get("status") == 0:
return execute_response.json()["data"]["execID"]
else:
raise SystemError(
f"request linkis hive execue entrance failed, status: {execute_response.json().get('status')},"
f" message: {execute_response.json().get('message')}"
)
def _status_entrance(self, exec_id):
execute_url = (
f"http://{self._address.host}:{self._address.port}{STATUS_URI}".replace(
"exec_id", exec_id
)
)
headers = {
"Token-Code": "MLSS",
"Token-User": "alexwu",
"Content-Type": "application/json",
}
execute_response = requests.Session().get(url=execute_url, headers=headers)
if execute_response.json().get("status") == 0:
execute_status = execute_response.json()["data"]["status"]
if execute_status == "Success":
return True
elif execute_status == "Failed":
raise Exception(
f"request linkis hive status entrance failed, status: {execute_status}"
)
else:
return False
else:
raise SystemError(
f"request linkis hive status entrance failed, status: {execute_response.json().get('status')},"
f" message: {execute_response.json().get('message')}"
)
def _result_entrance(self):
pass
| 5,327 | 33.597403 | 111 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/linkis_hive/__init__.py
|
from fate_arch.storage.linkis_hive._table import StorageTable
from fate_arch.storage.linkis_hive._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 174 | 34 | 65 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/linkis_hive/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common.address import LinkisHiveAddress
from fate_arch.storage import StorageSessionBase, StorageEngine, LinkisHiveStoreType
from fate_arch.abc import AddressABC
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.LINKIS_HIVE)
self.con = None
self.cur = None
self.address = None
def table(self, name, namespace, address: AddressABC, partitions,
storage_type: LinkisHiveStoreType = LinkisHiveStoreType.DEFAULT, options=None, **kwargs):
self.address = address
if isinstance(address, LinkisHiveAddress):
from fate_arch.storage.linkis_hive._table import StorageTable
return StorageTable(
address=address,
name=name,
namespace=namespace,
storage_type=storage_type,
partitions=partitions,
options=options)
raise NotImplementedError(f"address type {type(address)} not supported with eggroll storage")
def cleanup(self, name, namespace):
pass
def stop(self):
pass
def kill(self):
pass
| 1,865 | 36.32 | 103 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/linkis_hive/_settings.py
|
# token
Token_Code = ""
Token_User = "fate"
# uri
EXECUTE_URI = "/api/rest_j/v1/entrance/execute"
STATUS_URI = "/api/rest_j/v1/entrance/exec_id/status"
| 153 | 18.25 | 53 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/api/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from contextlib import closing
import requests
import os
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, ApiStoreType
from fate_arch.storage import StorageTableBase
LOGGER = getLogger()
class StorageTable(StorageTableBase):
def __init__(
self,
path,
address=None,
name: str = None,
namespace: str = None,
partitions: int = None,
store_type: ApiStoreType = ApiStoreType.EXTERNAL,
options=None,
):
self.path = path
self.data_count = 0
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.API,
store_type=store_type,
)
def _collect(self, **kwargs) -> list:
self.request = getattr(requests, self.address.method.lower(), None)
id_delimiter = self._meta.get_id_delimiter()
with closing(self.request(url=self.address.url, json=self.address.body, headers=self.address.header,
stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(self.path), exist_ok=True)
with open(self.path, 'wb') as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
with open(self.path, "r") as f:
while True:
lines = f.readlines(1024 * 1024 * 1024)
if lines:
for line in lines:
self.data_count += 1
id = line.split(id_delimiter)[0]
feature = id_delimiter.join(line.split(id_delimiter)[1:])
yield id, feature
else:
_, self._meta = self._meta.update_metas(count=self.data_count)
break
else:
raise Exception(response.status_code, response.text)
def _read(self) -> list:
return []
def _destroy(self):
pass
def _save_as(self, **kwargs):
pass
def _count(self):
return self.data_count
| 3,030 | 33.443182 | 108 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/api/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.api._table import StorageTable
from fate_arch.storage.api._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 774 | 37.75 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/api/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import traceback
from fate_arch.common import file_utils
from fate_arch.storage import StorageSessionBase, StorageEngine
from fate_arch.abc import AddressABC
from fate_arch.common.address import ApiAddress
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.PATH)
self.base_dir = os.path.join(file_utils.get_project_base_directory(), "api_data", session_id)
def table(self, address: AddressABC, name, namespace, partitions, store_type=None, options=None, **kwargs):
if isinstance(address, ApiAddress):
from fate_arch.storage.api._table import StorageTable
return StorageTable(path=os.path.join(self.base_dir, namespace, name),
address=address,
name=name,
namespace=namespace,
partitions=partitions, store_type=store_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with api storage")
def cleanup(self, name, namespace):
# path = os.path.join(self.base_dir, namespace, name)
# try:
# os.remove(path)
# except Exception as e:
# traceback.print_exc()
pass
def stop(self):
# try:
# shutil.rmtree(self.base_dir)
# except Exception as e:
# traceback.print_exc()
pass
def kill(self):
# try:
# shutil.rmtree(self.base_dir)
# except Exception as e:
# traceback.print_exc()
pass
| 2,334 | 36.66129 | 111 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/standalone/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Iterable
from fate_arch._standalone import Session
from fate_arch.storage import StorageEngine, StandaloneStoreType
from fate_arch.storage import StorageTableBase
class StorageTable(StorageTableBase):
def __init__(
self,
session: Session,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: StandaloneStoreType = StandaloneStoreType.ROLLPAIR_LMDB,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.STANDALONE,
store_type=store_type,
)
self._session = session
self._table = self._session.create_table(
namespace=self._namespace,
name=self._name,
partitions=partitions,
need_cleanup=self._store_type == StandaloneStoreType.ROLLPAIR_IN_MEMORY,
error_if_exist=False,
)
def _put_all(self, kv_list: Iterable, **kwargs):
return self._table.put_all(kv_list)
def _collect(self, **kwargs):
return self._table.collect(**kwargs)
def _count(self):
return self._table.count()
def _destroy(self):
return self._table.destroy()
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
self._table.save_as(name=name, namespace=namespace)
table = StorageTable(
session=self._session,
address=address,
partitions=partitions,
name=name,
namespace=namespace,
**kwargs,
)
return table
| 2,383 | 30.368421 | 84 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/standalone/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.standalone._table import StorageTable
from fate_arch.storage.standalone._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 788 | 38.45 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/standalone/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.abc import AddressABC
from fate_arch.common.address import StandaloneAddress
from fate_arch.storage import StorageSessionBase, StorageEngine
from fate_arch._standalone import Session
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.STANDALONE)
self._options = options if options else {}
self._session = Session(session_id=self._session_id)
def table(self, address: AddressABC, name, namespace, partitions, store_type=None, options=None, **kwargs):
if isinstance(address, StandaloneAddress):
from fate_arch.storage.standalone._table import StorageTable
return StorageTable(session=self._session, name=name, namespace=namespace, address=address,
partitions=partitions, store_type=store_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with standalone storage")
def cleanup(self, name, namespace):
self._session.cleanup(name=name, namespace=namespace)
def stop(self):
self._session.stop()
def kill(self):
self._session.kill()
| 1,865 | 42.395349 | 111 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/eggroll/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Iterable
from fate_arch.storage import StorageTableBase, StorageEngine, EggRollStoreType
class StorageTable(StorageTableBase):
def __init__(
self,
context,
name,
namespace,
address,
partitions: int = 1,
store_type: EggRollStoreType = EggRollStoreType.ROLLPAIR_LMDB,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.EGGROLL,
store_type=store_type,
)
self._context = context
self._options["store_type"] = self._store_type
self._options["total_partitions"] = partitions
self._options["create_if_missing"] = True
self._table = self._context.load(
namespace=self._namespace, name=self._name, options=self._options
)
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
self._table.save_as(name=name, namespace=namespace)
table = StorageTable(
context=self._context,
address=address,
partitions=partitions,
name=name,
namespace=namespace
)
return table
def _put_all(self, kv_list: Iterable, **kwargs):
return self._table.put_all(kv_list)
def _collect(self, **kwargs) -> list:
return self._table.get_all(**kwargs)
def _destroy(self):
return self._table.destroy()
def _count(self, **kwargs):
return self._table.count()
| 2,265 | 30.472222 | 79 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/eggroll/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.eggroll._table import StorageTable
from fate_arch.storage.eggroll._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 782 | 38.15 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/eggroll/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageSessionBase, StorageEngine, EggRollStoreType
from fate_arch.abc import AddressABC
from fate_arch.common.address import EggRollAddress
from eggroll.core.session import session_init
from eggroll.roll_pair.roll_pair import RollPairContext
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.EGGROLL)
self._options = options if options else {}
self._options['eggroll.session.deploy.mode'] = "cluster"
self._rp_session = session_init(session_id=self._session_id, options=self._options)
self._rpc = RollPairContext(session=self._rp_session)
self._session_id = self._rp_session.get_session_id()
def table(self, name, namespace,
address: AddressABC, partitions,
store_type: EggRollStoreType = EggRollStoreType.ROLLPAIR_LMDB, options=None, **kwargs):
if isinstance(address, EggRollAddress):
from fate_arch.storage.eggroll._table import StorageTable
return StorageTable(context=self._rpc, name=name, namespace=namespace, address=address,
partitions=partitions, store_type=store_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with eggroll storage")
def cleanup(self, name, namespace):
self._rpc.cleanup(name=name, namespace=namespace)
def stop(self):
return self._rp_session.stop()
def kill(self):
return self._rp_session.kill()
| 2,234 | 43.7 | 101 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/localfs/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
from typing import Iterable
from pyarrow import fs
from fate_arch.common import hdfs_utils
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, LocalFSStoreType
from fate_arch.storage import StorageTableBase
LOGGER = getLogger()
class StorageTable(StorageTableBase):
def __init__(
self,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
storage_type: LocalFSStoreType = LocalFSStoreType.DISK,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.LOCALFS,
store_type=storage_type,
)
self._local_fs_client = fs.LocalFileSystem()
@property
def path(self):
return self._address.path
def _put_all(
self, kv_list: Iterable, append=True, assume_file_exist=False, **kwargs
):
LOGGER.info(f"put in file: {self.path}")
# always create the directory first, otherwise the following creation of file will fail.
self._local_fs_client.create_dir("/".join(self.path.split("/")[:-1]))
if append and (assume_file_exist or self._exist()):
stream = self._local_fs_client.open_append_stream(
path=self.path, compression=None
)
else:
stream = self._local_fs_client.open_output_stream(
path=self.path, compression=None
)
counter = self._meta.get_count() if self._meta.get_count() else 0
with io.TextIOWrapper(stream) as writer:
for k, v in kv_list:
writer.write(hdfs_utils.serialize(k, v))
writer.write(hdfs_utils.NEWLINE)
counter = counter + 1
self._meta.update_metas(count=counter)
def _collect(self, **kwargs) -> list:
for line in self._as_generator():
yield hdfs_utils.deserialize(line.rstrip())
def _read(self) -> list:
for line in self._as_generator():
yield line
def _destroy(self):
# use try/catch to avoid stop while deleting an non-exist file
try:
self._local_fs_client.delete_file(self.path)
except Exception as e:
LOGGER.debug(e)
def _count(self):
count = 0
for _ in self._as_generator():
count += 1
return count
def _save_as(
self, address, partitions=None, name=None, namespace=None, **kwargs
):
self._local_fs_client.copy_file(src=self.path, dst=address.path)
return StorageTable(
address=address,
partitions=partitions,
name=name,
namespace=namespace,
**kwargs,
)
def close(self):
pass
def _exist(self):
info = self._local_fs_client.get_file_info([self.path])[0]
return info.type != fs.FileType.NotFound
def _as_generator(self):
info = self._local_fs_client.get_file_info([self.path])[0]
if info.type == fs.FileType.NotFound:
raise FileNotFoundError(f"file {self.path} not found")
elif info.type == fs.FileType.File:
for line in self._read_buffer_lines():
yield line
else:
selector = fs.FileSelector(self.path)
file_infos = self._local_fs_client.get_file_info(selector)
for file_info in file_infos:
if file_info.base_name.startswith(".") or file_info.base_name.startswith("_"):
continue
assert (
file_info.is_file
), f"{self.path} is directory contains a subdirectory: {file_info.path}"
with io.TextIOWrapper(
buffer=self._local_fs_client.open_input_stream(
f"{self._address.file_path:}/{file_info.path}"
),
encoding="utf-8",
) as reader:
for line in reader:
yield line
def _read_buffer_lines(self, path=None):
if not path:
path = self.path
buffer = self._local_fs_client.open_input_file(self.path)
offset = 0
block_size = 1024 * 1024 * 10
size = buffer.size()
while offset < size:
block_index = 1
buffer_block = buffer.read_at(block_size, offset)
if offset + block_size >= size:
for line in self._read_lines(buffer_block):
yield line
break
if buffer_block.endswith(b"\n"):
for line in self._read_lines(buffer_block):
yield line
offset += block_size
continue
end_index = -1
buffer_len = len(buffer_block)
while not buffer_block[:end_index].endswith(b"\n"):
if offset + block_index * block_size >= size:
break
end_index -= 1
if abs(end_index) == buffer_len:
block_index += 1
buffer_block = buffer.read_at(block_index * block_size, offset)
end_index = block_index * block_size
for line in self._read_lines(buffer_block[:end_index]):
yield line
offset += len(buffer_block[:end_index])
def _read_lines(self, buffer_block):
with io.TextIOWrapper(buffer=io.BytesIO(buffer_block), encoding="utf-8") as reader:
for line in reader:
yield line
| 6,373 | 33.454054 | 96 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/localfs/__init__.py
|
from fate_arch.storage.localfs._table import StorageTable
from fate_arch.storage.localfs._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 166 | 32.4 | 61 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/localfs/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageSessionBase, StorageEngine
from fate_arch.abc import AddressABC
from fate_arch.common.address import LocalFSAddress
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.LOCALFS)
def table(self, address: AddressABC, name, namespace, partitions, storage_type=None, options=None, **kwargs):
if isinstance(address, LocalFSAddress):
from fate_arch.storage.localfs._table import StorageTable
return StorageTable(address=address, name=name, namespace=namespace,
partitions=partitions, storage_type=storage_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with hdfs storage")
def cleanup(self, name, namespace):
pass
def stop(self):
pass
def kill(self):
pass
| 1,596 | 37.95122 | 113 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/path/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Iterable
from fate_arch.common import path_utils
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, PathStoreType
from fate_arch.storage import StorageTableBase
LOGGER = getLogger()
class StorageTable(StorageTableBase):
def __init__(
self,
address=None,
name: str = None,
namespace: str = None,
partitions: int = None,
store_type: PathStoreType = PathStoreType.PICTURE,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.PATH,
store_type=store_type,
)
def _collect(self, **kwargs) -> list:
return []
def _read(self) -> list:
return []
def _destroy(self):
pass
def _save_as(self, **kwargs):
pass
def _count(self):
return path_utils.get_data_table_count(self._address.path)
| 1,689 | 27.166667 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/path/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.path._table import StorageTable
from fate_arch.storage.path._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 776 | 37.85 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/path/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageSessionBase, StorageEngine
from fate_arch.abc import AddressABC
from fate_arch.common.address import PathAddress
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.PATH)
def table(self, address: AddressABC, name, namespace, partitions, store_type=None, options=None, **kwargs):
if isinstance(address, PathAddress):
from fate_arch.storage.path._table import StorageTable
return StorageTable(address=address, name=name, namespace=namespace,
partitions=partitions, store_type=store_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with hdfs storage")
def cleanup(self, name, namespace):
pass
def stop(self):
pass
def kill(self):
pass
| 1,578 | 37.512195 | 111 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/mysql/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageEngine, MySQLStoreType
from fate_arch.storage import StorageTableBase
class StorageTable(StorageTableBase):
def __init__(
self,
cur,
con,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: MySQLStoreType = MySQLStoreType.InnoDB,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.MYSQL,
store_type=store_type,
)
self._cur = cur
self._con = con
def check_address(self):
schema = self.meta.get_schema()
if schema:
if schema.get("sid") and schema.get("header"):
sql = "SELECT {},{} FROM {}".format(
schema.get("sid"), schema.get("header"), self._address.name
)
else:
sql = "SELECT {} FROM {}".format(
schema.get("sid"), self._address.name
)
feature_data = self.execute(sql)
for feature in feature_data:
if feature:
break
return True
@staticmethod
def get_meta_header(feature_name_list):
create_features = ""
feature_list = []
feature_size = "varchar(255)"
for feature_name in feature_name_list:
create_features += "{} {},".format(feature_name, feature_size)
feature_list.append(feature_name)
return create_features, feature_list
def _count(self):
sql = "select count(*) from {}".format(self._address.name)
try:
self._cur.execute(sql)
# self.con.commit()
ret = self._cur.fetchall()
count = ret[0][0]
except BaseException:
count = 0
return count
def _collect(self, **kwargs) -> list:
id_name, feature_name_list, _ = self._get_id_feature_name()
id_feature_name = [id_name]
id_feature_name.extend(feature_name_list)
sql = "select {} from {}".format(",".join(id_feature_name), self._address.name)
data = self.execute(sql)
for line in data:
feature_list = [str(feature) for feature in list(line[1:])]
yield line[0], self.meta.get_id_delimiter().join(feature_list)
def _put_all(self, kv_list, **kwargs):
id_name, feature_name_list, id_delimiter = self._get_id_feature_name()
feature_sql, feature_list = StorageTable.get_meta_header(feature_name_list)
id_size = "varchar(100)"
create_table = (
"create table if not exists {}({} {} NOT NULL, {} PRIMARY KEY({}))".format(
self._address.name, id_name, id_size, feature_sql, id_name
)
)
self._cur.execute(create_table)
sql = "REPLACE INTO {}({}, {}) VALUES".format(
self._address.name, id_name, ",".join(feature_list)
)
for kv in kv_list:
sql += '("{}", "{}"),'.format(kv[0], '", "'.join(kv[1].split(id_delimiter)))
sql = ",".join(sql.split(",")[:-1]) + ";"
self._cur.execute(sql)
self._con.commit()
def _destroy(self):
sql = "drop table {}".format(self._address.name)
self._cur.execute(sql)
self._con.commit()
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
sql = "create table {}.{} select * from {};".format(namespace, name, self._address.name)
self._cur.execute(sql)
self._con.commit()
def execute(self, sql, select=True):
self._cur.execute(sql)
if select:
while True:
result = self._cur.fetchone()
if result:
yield result
else:
break
else:
result = self._cur.fetchall()
return result
def _get_id_feature_name(self):
id = self.meta.get_schema().get("sid", "id")
header = self.meta.get_schema().get("header", [])
id_delimiter = self.meta.get_id_delimiter()
if not header:
feature_list = []
elif isinstance(header, str):
feature_list = header.split(id_delimiter)
elif isinstance(header, list):
feature_list = header
else:
feature_list = [header]
if self.meta.get_extend_sid():
id = feature_list[0]
if len(feature_list) > 1:
feature_list = feature_list[1:]
return id, feature_list, id_delimiter
| 5,363 | 34.289474 | 96 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/mysql/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.mysql._table import StorageTable
from fate_arch.storage.mysql._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 778 | 37.95 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/mysql/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
import pymysql
from fate_arch.storage import StorageSessionBase, StorageEngine, MySQLStoreType
from fate_arch.abc import AddressABC
from fate_arch.common.address import MysqlAddress
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.MYSQL)
self._db_con = {}
def table(self, name, namespace, address: AddressABC, partitions,
store_type: MySQLStoreType = MySQLStoreType.InnoDB, options=None, **kwargs):
if isinstance(address, MysqlAddress):
from fate_arch.storage.mysql._table import StorageTable
address_key = MysqlAddress(user=None,
passwd=None,
host=address.host,
port=address.port,
db=address.db,
name=None)
if address_key in self._db_con:
con, cur = self._db_con[address_key]
else:
self._create_db_if_not_exists(address)
con = pymysql.connect(host=address.host,
user=address.user,
passwd=address.passwd,
port=address.port,
db=address.db)
cur = con.cursor()
self._db_con[address_key] = (con, cur)
return StorageTable(cur=cur, con=con, address=address, name=name, namespace=namespace,
store_type=store_type, partitions=partitions, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with eggroll storage")
def cleanup(self, name, namespace):
pass
def stop(self):
try:
for key, val in self._db_con.items():
con = val[0]
cur = val[1]
cur.close()
con.close()
except Exception as e:
traceback.print_exc()
def kill(self):
return self.stop()
def _create_db_if_not_exists(self, address):
connection = pymysql.connect(host=address.host,
user=address.user,
password=address.passwd,
port=address.port)
with connection:
with connection.cursor() as cursor:
cursor.execute("create database if not exists {}".format(address.db))
print('create db {} success'.format(address.db))
connection.commit()
| 3,378 | 38.290698 | 101 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hive/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
from fate_arch.common import hive_utils
from fate_arch.common.file_utils import get_project_base_directory
from fate_arch.storage import StorageEngine, HiveStoreType
from fate_arch.storage import StorageTableBase
class StorageTable(StorageTableBase):
def __init__(
self,
cur,
con,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
storage_type: HiveStoreType = HiveStoreType.DEFAULT,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.HIVE,
store_type=storage_type,
)
self._cur = cur
self._con = con
def execute(self, sql, select=True):
self._cur.execute(sql)
if select:
while True:
result = self._cur.fetchone()
if result:
yield result
else:
break
else:
result = self._cur.fetchall()
return result
def _count(self, **kwargs):
sql = 'select count(*) from {}'.format(self._address.name)
try:
self._cur.execute(sql)
self._con.commit()
ret = self._cur.fetchall()
count = ret[0][0]
except BaseException:
count = 0
return count
def _collect(self, **kwargs) -> list:
sql = "select * from {}".format(self._address.name)
data = self.execute(sql)
for line in data:
yield hive_utils.deserialize_line(line)
def _read(self) -> list:
id_name, feature_name_list, _ = self._get_id_feature_name()
id_feature_name = [id_name]
id_feature_name.extend(feature_name_list)
sql = "select {} from {}".format(",".join(id_feature_name), self._address.name)
data = self.execute(sql)
for line in data:
yield hive_utils.read_line(line)
def _put_all(self, kv_list, **kwargs):
id_name, feature_name_list, id_delimiter = self.get_id_feature_name()
create_table = "create table if not exists {}(k varchar(128) NOT NULL, v string) row format delimited fields terminated by" \
" '{}'".format(self._address.name, id_delimiter)
self._cur.execute(create_table)
# load local file or hdfs file
temp_path = os.path.join(get_project_base_directory(), 'temp_data', uuid.uuid1().hex)
os.makedirs(os.path.dirname(temp_path), exist_ok=True)
with open(temp_path, 'w') as f:
for k, v in kv_list:
f.write(hive_utils.serialize_line(k, v))
sql = "load data local inpath '{}' into table {}".format(temp_path, self._address.name)
self._cur.execute(sql)
self._con.commit()
os.remove(temp_path)
def get_id_feature_name(self):
id = self.meta.get_schema().get('sid', 'id')
header = self.meta.get_schema().get('header')
id_delimiter = self.meta.get_id_delimiter()
if header:
if isinstance(header, str):
feature_list = header.split(id_delimiter)
elif isinstance(header, list):
feature_list = header
else:
feature_list = [header]
else:
raise Exception("hive table need data header")
return id, feature_list, id_delimiter
def _destroy(self):
sql = "drop table {}".format(self._name)
return self.execute(sql)
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
sql = "create table {}.{} like {}.{};".format(namespace, name, self._namespace, self._name)
return self.execute(sql)
def check_address(self):
schema = self.meta.get_schema()
if schema:
sql = 'SELECT {},{} FROM {}'.format(schema.get('sid'), schema.get('header'), self._address.name)
feature_data = self.execute(sql)
for feature in feature_data:
if feature:
return True
return False
@staticmethod
def get_meta_header(feature_name_list):
create_features = ''
feature_list = []
feature_size = "varchar(255)"
for feature_name in feature_name_list:
create_features += '{} {},'.format(feature_name, feature_size)
feature_list.append(feature_name)
return create_features, feature_list
def _get_id_feature_name(self):
id = self.meta.get_schema().get("sid", "id")
header = self.meta.get_schema().get("header")
id_delimiter = self.meta.get_id_delimiter()
if header:
if isinstance(header, str):
feature_list = header.split(id_delimiter)
elif isinstance(header, list):
feature_list = header
else:
feature_list = [header]
else:
raise Exception("mysql table need data header")
return id, feature_list, id_delimiter
| 5,809 | 35.086957 | 133 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hive/__init__.py
|
from fate_arch.storage.hive._table import StorageTable
from fate_arch.storage.hive._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 160 | 31.2 | 58 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hive/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
from impala.dbapi import connect
from fate_arch.common.address import HiveAddress
from fate_arch.storage import StorageSessionBase, StorageEngine, HiveStoreType
from fate_arch.abc import AddressABC
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.HIVE)
self._db_con = {}
def table(self, name, namespace, address: AddressABC, partitions,
storage_type: HiveStoreType = HiveStoreType.DEFAULT, options=None, **kwargs):
if isinstance(address, HiveAddress):
from fate_arch.storage.hive._table import StorageTable
address_key = HiveAddress(
host=address.host,
username=None,
port=address.port,
database=address.database,
auth_mechanism=None,
password=None,
name=None)
if address_key in self._db_con:
con, cur = self._db_con[address_key]
else:
self._create_db_if_not_exists(address)
con = connect(host=address.host,
port=address.port,
database=address.database,
auth_mechanism=address.auth_mechanism,
password=address.password,
user=address.username
)
cur = con.cursor()
self._db_con[address_key] = (con, cur)
return StorageTable(cur=cur, con=con, address=address, name=name, namespace=namespace,
storage_type=storage_type, partitions=partitions, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with eggroll storage")
def cleanup(self, name, namespace):
pass
def stop(self):
try:
for key, val in self._db_con.items():
con = val[0]
cur = val[1]
cur.close()
con.close()
except Exception as e:
traceback.print_exc()
def kill(self):
return self.stop()
def _create_db_if_not_exists(self, address):
connection = connect(host=address.host,
port=address.port,
user=address.username,
auth_mechanism=address.auth_mechanism,
password=address.password
)
with connection:
with connection.cursor() as cursor:
cursor.execute("create database if not exists {}".format(address.database))
print('create db {} success'.format(address.database))
connection.commit()
| 3,512 | 38.920455 | 101 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hdfs/_table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
from typing import Iterable
from pyarrow import fs
from fate_arch.common import hdfs_utils
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, HDFSStoreType
from fate_arch.storage import StorageTableBase
LOGGER = getLogger()
class StorageTable(StorageTableBase):
def __init__(
self,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: HDFSStoreType = HDFSStoreType.DISK,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.HDFS,
store_type=store_type,
)
# tricky way to load libhdfs
try:
from pyarrow import HadoopFileSystem
HadoopFileSystem(self.path)
except Exception as e:
LOGGER.warning(f"load libhdfs failed: {e}")
self._hdfs_client = fs.HadoopFileSystem.from_uri(self.path)
def check_address(self):
return self._exist()
def _put_all(
self, kv_list: Iterable, append=True, assume_file_exist=False, **kwargs
):
LOGGER.info(f"put in hdfs file: {self.file_path}")
if append and (assume_file_exist or self._exist()):
stream = self._hdfs_client.open_append_stream(
path=self.file_path, compression=None
)
else:
stream = self._hdfs_client.open_output_stream(
path=self.file_path, compression=None
)
counter = self._meta.get_count() if self._meta.get_count() else 0
with io.TextIOWrapper(stream) as writer:
for k, v in kv_list:
writer.write(hdfs_utils.serialize(k, v))
writer.write(hdfs_utils.NEWLINE)
counter = counter + 1
self._meta.update_metas(count=counter)
def _collect(self, **kwargs) -> list:
for line in self._as_generator():
yield hdfs_utils.deserialize(line.rstrip())
def _read(self) -> list:
for line in self._as_generator():
yield line
def _destroy(self):
self._hdfs_client.delete_file(self.file_path)
def _count(self):
count = 0
if self._meta.get_count():
return self._meta.get_count()
for _ in self._as_generator():
count += 1
return count
def _save_as(
self, address, partitions=None, name=None, namespace=None, **kwargs
):
self._hdfs_client.copy_file(src=self.file_path, dst=address.path)
table = StorageTable(
address=address,
partitions=partitions,
name=name,
namespace=namespace,
**kwargs,
)
return table
def close(self):
pass
@property
def path(self) -> str:
return f"{self._address.name_node}/{self._address.path}"
@property
def file_path(self) -> str:
return f"{self._address.path}"
def _exist(self):
info = self._hdfs_client.get_file_info([self.file_path])[0]
return info.type != fs.FileType.NotFound
def _as_generator(self):
LOGGER.info(f"as generator: {self.file_path}")
info = self._hdfs_client.get_file_info([self.file_path])[0]
if info.type == fs.FileType.NotFound:
raise FileNotFoundError(f"file {self.file_path} not found")
elif info.type == fs.FileType.File:
for line in self._read_buffer_lines():
yield line
else:
selector = fs.FileSelector(self.file_path)
file_infos = self._hdfs_client.get_file_info(selector)
for file_info in file_infos:
if file_info.base_name == "_SUCCESS":
continue
assert (
file_info.is_file
), f"{self.path} is directory contains a subdirectory: {file_info.path}"
with io.TextIOWrapper(
buffer=self._hdfs_client.open_input_stream(file_info.path),
encoding="utf-8",
) as reader:
for line in reader:
yield line
def _read_buffer_lines(self, path=None):
if not path:
path = self.file_path
buffer = self._hdfs_client.open_input_file(path)
offset = 0
block_size = 1024 * 1024 * 10
size = buffer.size()
while offset < size:
block_index = 1
buffer_block = buffer.read_at(block_size, offset)
if offset + block_size >= size:
for line in self._read_lines(buffer_block):
yield line
break
if buffer_block.endswith(b"\n"):
for line in self._read_lines(buffer_block):
yield line
offset += block_size
continue
end_index = -1
buffer_len = len(buffer_block)
while not buffer_block[:end_index].endswith(b"\n"):
if offset + block_index * block_size >= size:
break
end_index -= 1
if abs(end_index) == buffer_len:
block_index += 1
buffer_block = buffer.read_at(block_index * block_size, offset)
end_index = block_index * block_size
for line in self._read_lines(buffer_block[:end_index]):
yield line
offset += len(buffer_block[:end_index])
def _read_lines(self, buffer_block):
with io.TextIOWrapper(buffer=io.BytesIO(buffer_block), encoding="utf-8") as reader:
for line in reader:
yield line
| 6,540 | 33.246073 | 91 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hdfs/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage.hdfs._table import StorageTable
from fate_arch.storage.hdfs._session import StorageSession
__all__ = ["StorageTable", "StorageSession"]
| 776 | 37.85 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/storage/hdfs/_session.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageSessionBase, StorageEngine
from fate_arch.abc import AddressABC
from fate_arch.common.address import HDFSAddress
class StorageSession(StorageSessionBase):
def __init__(self, session_id, options=None):
super(StorageSession, self).__init__(session_id=session_id, engine=StorageEngine.HDFS)
def table(self, address: AddressABC, name, namespace, partitions, store_type=None, options=None, **kwargs):
if isinstance(address, HDFSAddress):
from fate_arch.storage.hdfs._table import StorageTable
return StorageTable(address=address, name=name, namespace=namespace,
partitions=partitions, store_type=store_type, options=options)
raise NotImplementedError(f"address type {type(address)} not supported with hdfs storage")
def cleanup(self, name, namespace):
pass
def stop(self):
pass
def kill(self):
pass
| 1,578 | 37.512195 | 111 |
py
|
FATE
|
FATE-master/python/fate_arch/metastore/db_utils.py
|
import operator
from fate_arch.common.base_utils import current_timestamp
from fate_arch.metastore.db_models import DB, StorageConnectorModel
class StorageConnector:
def __init__(self, connector_name, engine=None, connector_info=None):
self.name = connector_name
self.engine = engine
self.connector_info = connector_info
@DB.connection_context()
def create_or_update(self):
defaults = {
"f_name": self.name,
"f_engine": self.engine,
"f_connector_info": self.connector_info,
"f_create_time": current_timestamp(),
}
connector, status = StorageConnectorModel.get_or_create(
f_name=self.name,
defaults=defaults)
if status is False:
for key in defaults:
setattr(connector, key, defaults[key])
connector.save(force_insert=False)
@DB.connection_context()
def get_info(self):
connectors = [connector for connector in StorageConnectorModel.select().where(
operator.attrgetter("f_name")(StorageConnectorModel) == self.name)]
if connectors:
return connectors[0].f_connector_info
else:
return {}
| 1,238 | 31.605263 | 86 |
py
|
FATE
|
FATE-master/python/fate_arch/metastore/base_model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
import typing
from enum import IntEnum
from peewee import Field, IntegerField, FloatField, BigIntegerField, TextField, Model, CompositeKey, Metadata
from fate_arch.common import conf_utils, EngineType
from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, timestamp_to_date, date_string_to_timestamp, json_dumps, json_loads
from fate_arch.federation import FederationEngine
is_standalone = conf_utils.get_base_config("default_engines", {}).get(
EngineType.FEDERATION).upper() == FederationEngine.STANDALONE
if is_standalone:
from playhouse.apsw_ext import DateTimeField
else:
from peewee import DateTimeField
CONTINUOUS_FIELD_TYPE = {IntegerField, FloatField, DateTimeField}
AUTO_DATE_TIMESTAMP_FIELD_PREFIX = {"create", "start", "end", "update", "read_access", "write_access"}
class SerializedType(IntEnum):
PICKLE = 1
JSON = 2
class LongTextField(TextField):
field_type = 'LONGTEXT'
class JSONField(LongTextField):
default_value = {}
def __init__(self, object_hook=None, object_pairs_hook=None, **kwargs):
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
super().__init__(**kwargs)
def db_value(self, value):
if value is None:
value = self.default_value
return json_dumps(value)
def python_value(self, value):
if not value:
return self.default_value
return json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook)
class ListField(JSONField):
default_value = []
class SerializedField(LongTextField):
def __init__(self, serialized_type=SerializedType.PICKLE, object_hook=None, object_pairs_hook=None, **kwargs):
self._serialized_type = serialized_type
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
super().__init__(**kwargs)
def db_value(self, value):
if self._serialized_type == SerializedType.PICKLE:
return serialize_b64(value, to_str=True)
elif self._serialized_type == SerializedType.JSON:
if value is None:
return None
return json_dumps(value, with_type=True)
else:
raise ValueError(f"the serialized type {self._serialized_type} is not supported")
def python_value(self, value):
if self._serialized_type == SerializedType.PICKLE:
return deserialize_b64(value)
elif self._serialized_type == SerializedType.JSON:
if value is None:
return {}
return json_loads(value, object_hook=self._object_hook, object_pairs_hook=self._object_pairs_hook)
else:
raise ValueError(f"the serialized type {self._serialized_type} is not supported")
def is_continuous_field(cls: typing.Type) -> bool:
if cls in CONTINUOUS_FIELD_TYPE:
return True
for p in cls.__bases__:
if p in CONTINUOUS_FIELD_TYPE:
return True
elif p != Field and p != object:
if is_continuous_field(p):
return True
else:
return False
def auto_date_timestamp_field():
return {f"{f}_time" for f in AUTO_DATE_TIMESTAMP_FIELD_PREFIX}
def auto_date_timestamp_db_field():
return {f"f_{f}_time" for f in AUTO_DATE_TIMESTAMP_FIELD_PREFIX}
def remove_field_name_prefix(field_name):
return field_name[2:] if field_name.startswith('f_') else field_name
class BaseModel(Model):
f_create_time = BigIntegerField(null=True)
f_create_date = DateTimeField(null=True)
f_update_time = BigIntegerField(null=True)
f_update_date = DateTimeField(null=True)
def to_json(self):
# This function is obsolete
return self.to_dict()
def to_dict(self):
return self.__dict__['__data__']
def to_human_model_dict(self, only_primary_with: list = None):
model_dict = self.__dict__['__data__']
if not only_primary_with:
return {remove_field_name_prefix(k): v for k, v in model_dict.items()}
human_model_dict = {}
for k in self._meta.primary_key.field_names:
human_model_dict[remove_field_name_prefix(k)] = model_dict[k]
for k in only_primary_with:
human_model_dict[k] = model_dict[f'f_{k}']
return human_model_dict
@property
def meta(self) -> Metadata:
return self._meta
@classmethod
def get_primary_keys_name(cls):
return cls._meta.primary_key.field_names if isinstance(cls._meta.primary_key, CompositeKey) else [
cls._meta.primary_key.name]
@classmethod
def getter_by(cls, attr):
return operator.attrgetter(attr)(cls)
@classmethod
def query(cls, reverse=None, order_by=None, **kwargs):
filters = []
for f_n, f_v in kwargs.items():
attr_name = 'f_%s' % f_n
if not hasattr(cls, attr_name) or f_v is None:
continue
if type(f_v) in {list, set}:
f_v = list(f_v)
if is_continuous_field(type(getattr(cls, attr_name))):
if len(f_v) == 2:
for i, v in enumerate(f_v):
if isinstance(v, str) and f_n in auto_date_timestamp_field():
# time type: %Y-%m-%d %H:%M:%S
f_v[i] = date_string_to_timestamp(v)
lt_value = f_v[0]
gt_value = f_v[1]
if lt_value is not None and gt_value is not None:
filters.append(cls.getter_by(attr_name).between(lt_value, gt_value))
elif lt_value is not None:
filters.append(operator.attrgetter(attr_name)(cls) >= lt_value)
elif gt_value is not None:
filters.append(operator.attrgetter(attr_name)(cls) <= gt_value)
else:
filters.append(operator.attrgetter(attr_name)(cls) << f_v)
else:
filters.append(operator.attrgetter(attr_name)(cls) == f_v)
if filters:
query_records = cls.select().where(*filters)
if reverse is not None:
if not order_by or not hasattr(cls, f"f_{order_by}"):
order_by = "create_time"
if reverse is True:
query_records = query_records.order_by(cls.getter_by(f"f_{order_by}").desc())
elif reverse is False:
query_records = query_records.order_by(cls.getter_by(f"f_{order_by}").asc())
return [query_record for query_record in query_records]
else:
return []
@classmethod
def insert(cls, __data=None, **insert):
if isinstance(__data, dict) and __data:
__data[cls._meta.combined["f_create_time"]] = current_timestamp()
if insert:
insert["f_create_time"] = current_timestamp()
return super().insert(__data, **insert)
# update and insert will call this method
@classmethod
def _normalize_data(cls, data, kwargs):
normalized = super()._normalize_data(data, kwargs)
if not normalized:
return {}
normalized[cls._meta.combined["f_update_time"]] = current_timestamp()
for f_n in AUTO_DATE_TIMESTAMP_FIELD_PREFIX:
if {f"f_{f_n}_time", f"f_{f_n}_date"}.issubset(cls._meta.combined.keys()) and \
cls._meta.combined[f"f_{f_n}_time"] in normalized and \
normalized[cls._meta.combined[f"f_{f_n}_time"]] is not None:
normalized[cls._meta.combined[f"f_{f_n}_date"]] = timestamp_to_date(
normalized[cls._meta.combined[f"f_{f_n}_time"]])
return normalized
| 8,513 | 36.506608 | 158 |
py
|
FATE
|
FATE-master/python/fate_arch/metastore/db_models.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import os
import sys
from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BooleanField
from fate_arch.federation import FederationEngine
from fate_arch.metastore.base_model import DateTimeField
from fate_arch.common import file_utils, log, EngineType, conf_utils
from fate_arch.common.conf_utils import decrypt_database_config
from fate_arch.metastore.base_model import JSONField, SerializedField, BaseModel
LOGGER = log.getLogger()
DATABASE = decrypt_database_config()
is_standalone = conf_utils.get_base_config("default_engines", {}).get(EngineType.FEDERATION).upper() == \
FederationEngine.STANDALONE
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
key = str(cls) + str(os.getpid())
if key not in instances:
instances[key] = cls(*args, **kw)
return instances[key]
return _singleton
@singleton
class BaseDataBase(object):
def __init__(self):
database_config = DATABASE.copy()
db_name = database_config.pop("name")
if is_standalone and not bool(int(os.environ.get("FORCE_USE_MYSQL", 0))):
from playhouse.apsw_ext import APSWDatabase
self.database_connection = APSWDatabase(file_utils.get_project_base_directory("fate_sqlite.db"))
else:
from playhouse.pool import PooledMySQLDatabase
self.database_connection = PooledMySQLDatabase(db_name, **database_config)
DB = BaseDataBase().database_connection
def close_connection():
try:
if DB:
DB.close()
except Exception as e:
LOGGER.exception(e)
class DataBaseModel(BaseModel):
class Meta:
database = DB
@DB.connection_context()
def init_database_tables():
members = inspect.getmembers(sys.modules[__name__], inspect.isclass)
table_objs = []
create_failed_list = []
for name, obj in members:
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
table_objs.append(obj)
LOGGER.info(f"start create table {obj.__name__}")
try:
obj.create_table()
LOGGER.info(f"create table success: {obj.__name__}")
except Exception as e:
LOGGER.exception(e)
create_failed_list.append(obj.__name__)
if create_failed_list:
LOGGER.info(f"create tables failed: {create_failed_list}")
raise Exception(f"create tables failed: {create_failed_list}")
class StorageConnectorModel(DataBaseModel):
f_name = CharField(max_length=100, primary_key=True)
f_engine = CharField(max_length=100, index=True) # 'MYSQL'
f_connector_info = JSONField()
class Meta:
db_table = "t_storage_connector"
class StorageTableMetaModel(DataBaseModel):
f_name = CharField(max_length=100, index=True)
f_namespace = CharField(max_length=100, index=True)
f_address = JSONField()
f_engine = CharField(max_length=100) # 'EGGROLL', 'MYSQL'
f_store_type = CharField(max_length=50, null=True) # store type
f_options = JSONField()
f_partitions = IntegerField(null=True)
f_id_delimiter = CharField(null=True)
f_in_serialized = BooleanField(default=True)
f_have_head = BooleanField(default=True)
f_extend_sid = BooleanField(default=False)
f_auto_increasing_sid = BooleanField(default=False)
f_schema = SerializedField()
f_count = BigIntegerField(null=True)
f_part_of_data = SerializedField()
f_origin = CharField(max_length=50, default='')
f_disable = BooleanField(default=False)
f_description = TextField(default='')
f_read_access_time = BigIntegerField(null=True)
f_read_access_date = DateTimeField(null=True)
f_write_access_time = BigIntegerField(null=True)
f_write_access_date = DateTimeField(null=True)
class Meta:
db_table = "t_storage_table_meta"
primary_key = CompositeKey('f_name', 'f_namespace')
class SessionRecord(DataBaseModel):
f_engine_session_id = CharField(max_length=150, null=False)
f_manager_session_id = CharField(max_length=150, null=False)
f_engine_type = CharField(max_length=10, index=True)
f_engine_name = CharField(max_length=50, index=True)
f_engine_address = JSONField()
class Meta:
db_table = "t_session_record"
| 4,947 | 32.659864 | 108 |
py
|
FATE
|
FATE-master/python/fate_arch/metastore/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/default_empty_fill_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: default-empty-fill.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x64\x65\x66\x61ult-empty-fill.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\'\n\x17\x44\x65\x66\x61ultEmptyFillMessage\x12\x0c\n\x04\x66lag\x18\x01 \x01(\tB\x17\x42\x15\x44\x65\x66\x61ultEmptyFillProtob\x06proto3')
_DEFAULTEMPTYFILLMESSAGE = DESCRIPTOR.message_types_by_name['DefaultEmptyFillMessage']
DefaultEmptyFillMessage = _reflection.GeneratedProtocolMessageType('DefaultEmptyFillMessage', (_message.Message,), {
'DESCRIPTOR' : _DEFAULTEMPTYFILLMESSAGE,
'__module__' : 'default_empty_fill_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.DefaultEmptyFillMessage)
})
_sym_db.RegisterMessage(DefaultEmptyFillMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'B\025DefaultEmptyFillProto'
_DEFAULTEMPTYFILLMESSAGE._serialized_start=68
_DEFAULTEMPTYFILLMESSAGE._serialized_end=107
# @@protoc_insertion_point(module_scope)
| 1,532 | 41.583333 | 286 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/inference_service_pb2.py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: inference_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17inference_service.proto\x12\x1e\x63om.webank.ai.fate.api.serving\"0\n\x10InferenceMessage\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\x32\xf6\x02\n\x10InferenceService\x12o\n\tinference\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessage\x12w\n\x11startInferenceJob\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessage\x12x\n\x12getInferenceResult\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessageB\x17\x42\x15InferenceServiceProtob\x06proto3')
_INFERENCEMESSAGE = DESCRIPTOR.message_types_by_name['InferenceMessage']
InferenceMessage = _reflection.GeneratedProtocolMessageType('InferenceMessage', (_message.Message,), {
'DESCRIPTOR' : _INFERENCEMESSAGE,
'__module__' : 'inference_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.serving.InferenceMessage)
})
_sym_db.RegisterMessage(InferenceMessage)
_INFERENCESERVICE = DESCRIPTOR.services_by_name['InferenceService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'B\025InferenceServiceProto'
_INFERENCEMESSAGE._serialized_start=59
_INFERENCEMESSAGE._serialized_end=107
_INFERENCESERVICE._serialized_start=110
_INFERENCESERVICE._serialized_end=484
# @@protoc_insertion_point(module_scope)
| 2,047 | 51.512821 | 724 |
py
|
FATE
|
FATE-master/python/fate_arch/protobuf/python/inference_service_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import inference_service_pb2 as inference__service__pb2
class InferenceServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.inference = channel.unary_unary(
'/com.webank.ai.fate.api.serving.InferenceService/inference',
request_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
response_deserializer=inference__service__pb2.InferenceMessage.FromString,
)
self.startInferenceJob = channel.unary_unary(
'/com.webank.ai.fate.api.serving.InferenceService/startInferenceJob',
request_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
response_deserializer=inference__service__pb2.InferenceMessage.FromString,
)
self.getInferenceResult = channel.unary_unary(
'/com.webank.ai.fate.api.serving.InferenceService/getInferenceResult',
request_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
response_deserializer=inference__service__pb2.InferenceMessage.FromString,
)
class InferenceServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def inference(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def startInferenceJob(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getInferenceResult(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InferenceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'inference': grpc.unary_unary_rpc_method_handler(
servicer.inference,
request_deserializer=inference__service__pb2.InferenceMessage.FromString,
response_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
),
'startInferenceJob': grpc.unary_unary_rpc_method_handler(
servicer.startInferenceJob,
request_deserializer=inference__service__pb2.InferenceMessage.FromString,
response_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
),
'getInferenceResult': grpc.unary_unary_rpc_method_handler(
servicer.getInferenceResult,
request_deserializer=inference__service__pb2.InferenceMessage.FromString,
response_serializer=inference__service__pb2.InferenceMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.fate.api.serving.InferenceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class InferenceService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def inference(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.serving.InferenceService/inference',
inference__service__pb2.InferenceMessage.SerializeToString,
inference__service__pb2.InferenceMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def startInferenceJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.serving.InferenceService/startInferenceJob',
inference__service__pb2.InferenceMessage.SerializeToString,
inference__service__pb2.InferenceMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getInferenceResult(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.webank.ai.fate.api.serving.InferenceService/getInferenceResult',
inference__service__pb2.InferenceMessage.SerializeToString,
inference__service__pb2.InferenceMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 5,990 | 44.045113 | 132 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.