repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-multi-cv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
cv_param={
"need_cv": True,
"n_splits": 5,
"shuffle": False,
"random_seed": 103
}
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,929 | 40.368421 | 104 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-layered-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=10,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
boosting_strategy='layered',
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,018 | 43.026316 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-multi-mo.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
multi_mode='multi_output'
)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,089 | 43.26087 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-EINI-with-random-mask.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
EINI_inference=True,
EINI_random_mask=True
)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
print('start to predict')
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersect_0, hetero_secure_boost_0, evaluation_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
predict_result = predict_pipeline.get_component("hetero_secure_boost_0").get_output_data()
print("Showing 10 data of predict result")
print(predict_result.head(10))
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,930 | 41.978261 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-binary-cipher-compress.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "paillier"},
tree_param={"max_depth": 3},
cipher_compress_error=8,
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,013 | 42.982456 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-regression.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "student_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "student_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "student_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "student_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest",
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_type="float")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest",
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_type="float")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="regression",
objective_param={"objective": "lse"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,024 | 41.226891 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-binary-with-missing-value.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "ionosphere_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "ionosphere_scale_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "ionosphere_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "ionosphere_scale_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest",
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_name="label",
label_type="int")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest",
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_name="label",
label_type="int")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
use_missing=True, # use missing
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,201 | 41.639344 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-binary-complete-secure.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
complete_secure=True,
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,010 | 42.95614 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-binary-cv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
cv_param={
"need_cv": True,
"n_splits": 5,
"shuffle": False,
"random_seed": 103
}
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,915 | 40.221053 | 104 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-early-stop.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "student_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "student_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "student_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "student_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="regression",
objective_param={"objective": "lse"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
early_stopping_rounds=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,007 | 42.929825 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,942 | 42.743363 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-layered-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "vehicle_scale_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "vehicle_scale_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1,
boosting_strategy='layered'
)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,091 | 43.278261 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-mix-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_validate_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=10,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
boosting_strategy='mix',
validation_freqs=1)
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
print("fitting hetero secureboost done, result:")
print(pipeline.get_component("hetero_secure_boost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,014 | 42.991228 | 119 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_sbt/pipeline-hetero-sbt-warm-start.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroSecureBoost
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def prettify(response, verbose=True):
if verbose:
print(json.dumps(response, indent=4, ensure_ascii=False))
print()
return response
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", output_format='dense')
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True)
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# secure boost component
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1)
hetero_secure_boost_1 = HeteroSecureBoost(name="hetero_secure_boost_1",
num_trees=3,
task_type="classification",
objective_param={"objective": "cross_entropy"},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": 3},
validation_freqs=1)
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_secure_boost_1, data=Data(train_data=intersection_0.output.data),
model=Model(model=hetero_secure_boost_0.output.model))
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_1.output.data))
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
prettify(pipeline.get_component("hetero_secure_boost_0").get_summary())
prettify(pipeline.get_component("hetero_secure_boost_1").get_summary())
prettify(pipeline.get_component("evaluation_0").get_summary())
return pipeline
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,192 | 40.544 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/secure_information_retrieval/secure-information-retrieval.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader
from pipeline.component import DataTransform
from pipeline.component import SecureInformationRetrieval
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role="guest", party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="datatransform_0")
data_transform_0.get_party_instance(
role="guest", party_id=guest).component_param(
with_label=False, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=True)
param = {
"security_level": 0.5,
"oblivious_transfer_protocol": "OT_Hauck",
"commutative_encryption": "CommutativeEncryptionPohligHellman",
"non_committing_encryption": "aes",
"dh_params": {
"key_length": 1024
},
"raw_retrieval": False,
"target_cols": ["x0", "x3"]
}
secure_information_retrieval_0 = SecureInformationRetrieval(name="secure_information_retrieval_0", **param)
# add components to pipeline, in order of task execution.
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(secure_information_retrieval_0, data=Data(data=data_transform_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,411 | 35.297872 | 111 |
py
|
FATE
|
FATE-master/examples/pipeline/multi_model/pipeline-homo-multi-model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.utils.tools import load_job_config
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HomoOneHotEncoder
from pipeline.component.homo_feature_binning import HomoFeatureBinning
from pipeline.component import FederatedSample
from pipeline.component import HomoLR
from pipeline.component import HomoSecureBoost
from pipeline.component import LocalBaseline
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", with_label=True)
data_transform_1 = DataTransform(name="data_transform_1")
federated_sample_0 = FederatedSample(name="federated_sample_0", mode="stratified", method="downsample",
fractions=[[0, 1.0], [1, 1.0]], task_type="homo")
homo_binning_0 = HomoFeatureBinning(name='homo_binning_0', sample_bins=10, method="recursive_query")
homo_binning_1 = HomoFeatureBinning(name='homo_binning_1')
homo_onehot_0 = HomoOneHotEncoder(name='homo_onehot_0', need_alignment=True)
homo_onehot_1 = HomoOneHotEncoder(name='homo_onehot_1')
homo_lr_0 = HomoLR(name="homo_lr_0", penalty="L2", tol=0.0001, alpha=1.0,
optimizer="rmsprop", max_iter=5)
homo_lr_1 = HomoLR(name="homo_lr_1")
local_baseline_0 = LocalBaseline(name="local_baseline_0", model_name="LogisticRegression",
model_opts={"penalty": "l2", "tol": 0.0001, "C": 1.0, "fit_intercept": True,
"solver": "lbfgs", "max_iter": 5, "multi_class": "ovr"})
local_baseline_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True)
local_baseline_0.get_party_instance(role='host', party_id=host).component_param(need_run=True)
local_baseline_1 = LocalBaseline(name="local_baseline_1")
homo_secureboost_0 = HomoSecureBoost(name="homo_secureboost_0", num_trees=3)
homo_secureboost_1 = HomoSecureBoost(name="homo_secureboost_1", num_trees=3)
evaluation_0 = Evaluation(name="evaluation_0")
evaluation_1 = Evaluation(name="evaluation_1")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data),
model=Model(model=data_transform_0.output.model))
pipeline.add_component(federated_sample_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(homo_binning_0, data=Data(data=federated_sample_0.output.data))
pipeline.add_component(homo_binning_1, data=Data(data=data_transform_1.output.data),
model=Model(model=homo_binning_0.output.model))
pipeline.add_component(homo_onehot_0, data=Data(data=homo_binning_0.output.data))
pipeline.add_component(homo_onehot_1, data=Data(data=homo_binning_1.output.data),
model=Model(model=homo_onehot_0.output.model))
pipeline.add_component(homo_lr_0, data=Data(data=homo_onehot_0.output.data))
pipeline.add_component(homo_lr_1, data=Data(data=homo_onehot_1.output.data),
model=Model(model=homo_lr_0.output.model))
pipeline.add_component(local_baseline_0, data=Data(data=homo_onehot_0.output.data))
pipeline.add_component(local_baseline_1, data=Data(data=homo_onehot_1.output.data),
model=Model(model=local_baseline_0.output.model))
pipeline.add_component(homo_secureboost_0, data=Data(data=homo_onehot_0.output.data))
pipeline.add_component(homo_secureboost_1, data=Data(data=homo_onehot_1.output.data),
model=Model(model=homo_secureboost_0.output.model))
pipeline.add_component(evaluation_0,
data=Data(
data=[homo_lr_0.output.data, homo_lr_1.output.data,
local_baseline_0.output.data, local_baseline_1.output.data]))
pipeline.add_component(evaluation_1,
data=Data(
data=[homo_secureboost_0.output.data, homo_secureboost_1.output.data]))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("evaluation_0").get_summary())
print(pipeline.get_component("evaluation_1").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 6,515 | 46.217391 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/multi_model/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/pipeline/multi_model/pipeline-hetero-multi-model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.utils.tools import load_job_config
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import FeatureScale
from pipeline.component import FederatedSample
from pipeline.component import HeteroFeatureBinning
from pipeline.component import HeteroFeatureSelection
from pipeline.component import HeteroLR
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import OneHotEncoder
from pipeline.component import Union
from pipeline.component import LocalBaseline
from pipeline.component import HeteroLinR
from pipeline.component import HeteroPoisson
from pipeline.component import HeteroSSHELR
from pipeline.component import HeteroSSHELinR
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
"""Note: This script is used for components regression only"""
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_2 = Reader(name="reader_2")
reader_2.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_2.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
missing_fill=True,
outlier_replace=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False, missing_fill=True,
outlier_replace=True)
data_transform_1 = DataTransform(name="data_transform_1")
data_transform_2 = DataTransform(name="data_transform_2")
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
intersection_2 = Intersection(name="intersection_2")
union_0 = Union(name="union_0")
federated_sample_0 = FederatedSample(name="federated_sample_0", mode="stratified", method="downsample",
fractions=[[0, 1.0], [1, 1.0]])
feature_scale_0 = FeatureScale(name="feature_scale_0")
feature_scale_1 = FeatureScale(name="feature_scale_1")
hetero_feature_binning_0 = HeteroFeatureBinning(name="hetero_feature_binning_0")
hetero_feature_binning_1 = HeteroFeatureBinning(name="hetero_feature_binning_1")
hetero_feature_selection_0 = HeteroFeatureSelection(name="hetero_feature_selection_0")
hetero_feature_selection_1 = HeteroFeatureSelection(name="hetero_feature_selection_1")
one_hot_0 = OneHotEncoder(name="one_hot_0")
one_hot_1 = OneHotEncoder(name="one_hot_1")
hetero_lr_0 = HeteroLR(name="hetero_lr_0", penalty="L2", optimizer="rmsprop", tol=1e-5,
init_param={"init_method": "random_uniform"},
alpha=0.01, max_iter=3, early_stop="diff", batch_size=320, learning_rate=0.15)
hetero_lr_1 = HeteroLR(name="hetero_lr_1")
hetero_lr_2 = HeteroLR(name="hetero_lr_2", penalty="L2", optimizer="rmsprop", tol=1e-5,
init_param={"init_method": "random_uniform"},
alpha=0.01, max_iter=3, early_stop="diff", batch_size=320, learning_rate=0.15,
cv_param={"n_splits": 5,
"shuffle": True,
"random_seed": 103,
"need_cv": True})
hetero_sshe_lr_0 = HeteroSSHELR(name="hetero_sshe_lr_0", reveal_every_iter=True, reveal_strategy="respectively",
penalty="L2", optimizer="rmsprop", tol=1e-5, batch_size=320, learning_rate=0.15,
init_param={"init_method": "random_uniform"}, alpha=0.01, max_iter=3)
hetero_sshe_lr_1 = HeteroSSHELR(name="hetero_sshe_lr_1")
local_baseline_0 = LocalBaseline(name="local_baseline_0", model_name="LogisticRegression",
model_opts={"penalty": "l2", "tol": 0.0001, "C": 1.0, "fit_intercept": True,
"solver": "lbfgs", "max_iter": 5, "multi_class": "ovr"})
local_baseline_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True)
local_baseline_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
local_baseline_1 = LocalBaseline(name="local_baseline_1")
hetero_secureboost_0 = HeteroSecureBoost(name="hetero_secureboost_0", num_trees=3)
hetero_secureboost_1 = HeteroSecureBoost(name="hetero_secureboost_1")
hetero_secureboost_2 = HeteroSecureBoost(name="hetero_secureboost_2", num_trees=3,
cv_param={"shuffle": False, "need_cv": True})
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=3, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
floating_point_precision=23)
hetero_linr_1 = HeteroLinR(name="hetero_linr_1")
hetero_sshe_linr_0 = HeteroSSHELinR(name="hetero_sshe_linr_0", max_iter=5, early_stop="weight_diff", batch_size=-1)
hetero_sshe_linr_1 = HeteroSSHELinR(name="hetero_sshe_linr_1")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=10,
alpha=100.0, batch_size=-1, learning_rate=0.01, optimizer="rmsprop",
exposure_colname="exposure", decay_sqrt=False, tol=0.001,
init_param={"init_method": "zeros"}, penalty="L2")
hetero_poisson_1 = HeteroPoisson(name="hetero_poisson_1")
evaluation_0 = Evaluation(name="evaluation_0")
evaluation_1 = Evaluation(name="evaluation_1")
evaluation_2 = Evaluation(name="evaluation_2")
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(reader_2)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data),
model=Model(model=data_transform_0.output.model))
pipeline.add_component(data_transform_2, data=Data(data=reader_2.output.data),
model=Model(model=data_transform_0.output.model))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(intersection_2, data=Data(data=data_transform_2.output.data))
pipeline.add_component(union_0, data=Data(data=[intersection_0.output.data, intersection_2.output.data]))
pipeline.add_component(federated_sample_0, data=Data(data=intersection_1.output.data))
pipeline.add_component(feature_scale_0, data=Data(data=union_0.output.data))
pipeline.add_component(feature_scale_1, data=Data(data=federated_sample_0.output.data),
model=Model(model=feature_scale_0.output.model))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=feature_scale_0.output.data))
pipeline.add_component(hetero_feature_binning_1, data=Data(data=feature_scale_1.output.data),
model=Model(model=hetero_feature_binning_0.output.model))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=hetero_feature_binning_0.output.data))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=hetero_feature_binning_1.output.data),
model=Model(model=hetero_feature_selection_0.output.model))
pipeline.add_component(one_hot_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(one_hot_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(model=one_hot_0.output.model))
pipeline.add_component(hetero_lr_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_lr_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_lr_0.output.model))
pipeline.add_component(hetero_lr_2, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(local_baseline_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(local_baseline_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=local_baseline_0.output.model))
pipeline.add_component(hetero_sshe_lr_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_sshe_lr_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_sshe_lr_0.output.model))
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_secureboost_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_secureboost_0.output.model))
pipeline.add_component(hetero_secureboost_2, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_linr_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_linr_0.output.model))
pipeline.add_component(hetero_sshe_linr_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_sshe_linr_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_sshe_linr_0.output.model))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=one_hot_0.output.data))
pipeline.add_component(hetero_poisson_1, data=Data(test_data=one_hot_1.output.data),
model=Model(model=hetero_poisson_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data, hetero_lr_1.output.data,
hetero_sshe_lr_0.output.data, hetero_sshe_lr_1.output.data,
local_baseline_0.output.data, local_baseline_1.output.data]))
pipeline.add_component(evaluation_1,
data=Data(
data=[hetero_linr_0.output.data, hetero_linr_1.output.data,
hetero_sshe_linr_0.output.data, hetero_linr_1.output.data]))
pipeline.add_component(evaluation_2,
data=Data(
data=[hetero_poisson_0.output.data, hetero_poisson_1.output.data]))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("evaluation_0").get_summary())
print(pipeline.get_component("evaluation_1").get_summary())
print(pipeline.get_component("evaluation_2").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 13,231 | 52.788618 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/pipeline-hetero-poisson-sparse.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroPoisson
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", output_format="sparse")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True,
label_name="doctorco",
label_type="float")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=2,
alpha=100.0, batch_size=-1, learning_rate=0.01,
exposure_colname="exposure", optimizer="rmsprop",
penalty="L2", decay_sqrt=False, tol=0.001,
init_param={"init_method": "zeros"},
)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_poisson_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,736 | 42.453488 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/pipeline-hetero-poisson-validate.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroPoisson
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = [{"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"},
{"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"}]
host_train_data = [{"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"},
{"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"}]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data[0])
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data[0])
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data[1])
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data[1])
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_1 = DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="doctorco",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=20,
exposure_colname="exposure", optimizer="rmsprop", tol=0.001,
alpha=100.0, batch_size=-1, learning_rate=0.01, penalty="L2",
callback_param={"callbacks": ["EarlyStopping", "PerformanceEvaluate"],
"validation_freqs": 1,
"early_stopping_rounds": 5,
"metrics": [
"mean_absolute_error",
"root_mean_squared_error"
],
"use_first_metric_only": False,
"save_freq": 1
},
init_param={"init_method": "zeros"})
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=intersection_0.output.data,
validate_data=intersect_1.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,891 | 43.472727 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/pipeline-hetero-poisson-warm-start.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroPoisson
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="doctorco",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=3,
alpha=100.0, batch_size=-1, learning_rate=0.01, optimizer="rmsprop",
exposure_colname="exposure", decay_sqrt=False, tol=0.001,
callback_param={"callbacks": ["ModelCheckpoint"]},
init_param={"init_method": "zeros"}, penalty="L2")
hetero_poisson_1 = HeteroPoisson(name="hetero_poisson_1", early_stop="weight_diff", max_iter=10,
alpha=100.0, batch_size=-1, learning_rate=0.01, optimizer="rmsprop",
exposure_colname="exposure", decay_sqrt=False, tol=0.001, penalty="L2")
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_poisson_1, data=Data(train_data=intersection_0.output.data),
model=Model(model=hetero_poisson_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_poisson_1.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,120 | 42.840426 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/pipeline-hetero-poisson-cv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroPoisson
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="doctorco",
label_type="float",
output_format="dense",
missing_fill=True,
outlier_replace=False)
data_transform_0.get_party_instance(
role='host',
party_id=host).component_param(
with_label=False,
output_format="dense",
outlier_replace=False)
intersection_0 = Intersection(name="intersection_0")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=10,
optimizer="rmsprop", tol=0.001, penalty="L2",
alpha=100.0, batch_size=-1, learning_rate=0.01,
exposure_colname="exposure", decay_sqrt=False,
init_param={"init_method": "zeros"},
cv_param={
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": True
})
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=intersection_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,726 | 38.231579 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_poisson/pipeline-hetero-poisson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroPoisson
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "dvisits_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "dvisits_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="doctorco",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_poisson_0 = HeteroPoisson(name="hetero_poisson_0", early_stop="weight_diff", max_iter=10,
alpha=100.0, batch_size=-1, learning_rate=0.01, optimizer="rmsprop",
exposure_colname="exposure", decay_sqrt=False, tol=0.001,
init_param={"init_method": "zeros"}, penalty="L2")
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_poisson_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_poisson_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_poisson_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,117 | 38.980583 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/data_split/pipeline-hetero-data-split.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroDataSplit
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_data_split_0 = HeteroDataSplit(name="hetero_data_split_0", stratified=True,
test_size=0.3, split_points=[0.0, 0.2])
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=10, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"})
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_data_split_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=hetero_data_split_0.output.data.train_data,
validate_data=hetero_data_split_0.output.data.test_data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,599 | 40.37931 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/data_split/pipeline-hetero-data-split-multi-model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroDataSplit
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_data_split_0 = HeteroDataSplit(name="hetero_data_split_0", stratified=False,
test_size=0.3, validate_size=0.2)
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=10, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"})
hetero_linr_1 = HeteroLinR()
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_data_split_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=hetero_data_split_0.output.data.train_data,
validate_data=hetero_data_split_0.output.data.validate_data))
pipeline.add_component(hetero_linr_1, data=Data(test_data=hetero_data_split_0.output.data.test_data),
model=Model(model=hetero_linr_0.output.model))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,848 | 41.296703 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/data_split/pipeline-homo-data-split-validate.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HomoDataSplit
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_name="y",
label_type="int")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=True)
homo_data_split_0 = HomoDataSplit(name="homo_data_split_0", stratified=True, validate_size=0.2)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(homo_data_split_0, data=Data(data=data_transform_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("homo_data_split_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 2,745 | 34.205128 | 103 |
py
|
FATE
|
FATE-master/examples/pipeline/data_split/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/pipeline/data_split/pipeline-homo-data-split.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HomoDataSplit
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
output_format="dense",
label_name="y",
label_type="int")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=True)
homo_data_split_0 = HomoDataSplit(name="homo_data_split_0", stratified=False, test_size=0.3, validate_size=0.2)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(homo_data_split_0, data=Data(data=data_transform_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("homo_data_split_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 2,761 | 34.410256 | 115 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_kmeans/pipeline-kmeans-validate.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroKmeans
from pipeline.component import Intersection
from pipeline.component import Evaluation
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_eval_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1")
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
param = {
"k": 3,
"max_iter": 10
}
hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param)
hetero_kmeans_1 = HeteroKmeans(name='hetero_kmeans_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering')
evaluation_1 = Evaluation(name='evaluation_1', eval_type='clustering')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_kmeans_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_kmeans_1, data=Data(train_data=intersection_1.output.data))
# print(f"data: {hetero_kmeans_0.output.data.data[0]}")
pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0]))
pipeline.add_component(evaluation_1, data=Data(data=hetero_kmeans_1.output.data.data[0]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_kmeans_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,285 | 41.288 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_kmeans/pipeline-kmeans-multi-host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroKmeans
from pipeline.component import Intersection
from pipeline.component import Evaluation
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = [{"name": "breast_hetero_host", "namespace": f"experiment{namespace}"},
{"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}]
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data[0])
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data[1])
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=hosts[0]).component_param(with_label=False)
data_transform_0.get_party_instance(role='host', party_id=hosts[1]).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
param = {
"k": 3,
"max_iter": 10
}
hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param)
evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_kmeans_0,
data=Data(train_data=intersection_0.output.data))
# print(f"data: {hetero_kmeans_0.output.data.data[0]}")
pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_kmeans_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,365 | 39.055046 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_kmeans/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 614 | 40 | 75 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_kmeans/pipeline-kmeans.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroKmeans
from pipeline.component import Intersection
from pipeline.component import Evaluation
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
param = {
"k": 3,
"max_iter": 10
}
hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param)
evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_kmeans_0, data=Data(train_data=intersection_0.output.data))
print(f"data: {hetero_kmeans_0.output.data.data[0]}")
pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_kmeans_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,094 | 37.632075 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_kmeans/pipeline-kmeans-with-feature-engineering.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroKmeans
from pipeline.component import Intersection
from pipeline.component import HeteroFeatureBinning
from pipeline.component import HeteroFeatureSelection
from pipeline.component import Evaluation
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["manually", "iv_filter"],
"manually_param": {
"filter_out_indexes": [1]
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.001]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
param = {
"k": 3,
"max_iter": 10
}
hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param)
evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=hetero_feature_binning_0.output.model))
pipeline.add_component(hetero_kmeans_0, data=Data(train_data=hetero_feature_selection_0.output.data))
print(f"data: {hetero_kmeans_0.output.data.data[0]}")
pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_kmeans_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,292 | 37.635036 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/sample_weight/pipeline-sample-weight-transform.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import SampleWeight
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="y",
label_type="int",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
class_weight="balanced")
sample_weight_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
sample_weight_1 = SampleWeight(name="sample_weight_1")
hetero_lr_0 = HeteroLR(name="hetero_lr_0", optimizer="nesterov_momentum_sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(sample_weight_1,
data=Data(data=intersection_0.output.data),
model=Model(model=sample_weight_0.output.model))
pipeline.add_component(hetero_lr_0, data=Data(train_data=sample_weight_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, sample_weight_0, hetero_lr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,769 | 40.12069 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/sample_weight/pipeline-sample-weight-balanced.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import SampleWeight
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="y",
label_type="int",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
class_weight="balanced")
sample_weight_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
hetero_lr_0 = HeteroLR(name="hetero_lr_0", optimizer="nesterov_momentum_sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=sample_weight_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, sample_weight_0, hetero_lr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,511 | 39.648649 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/sample_weight/pipeline-sample-weight-name.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import SampleWeight, FederatedSample
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.component import FeatureScale
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest_sid", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host_sid", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", with_match_id=True)
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="y",
label_type="int",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
scale_0 = FeatureScale(name="scale_0", method="min_max_scale", mode="normal", scale_names=["x0"])
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
sample_weight_name="x0")
sample_weight_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
federated_sampler_0 = FederatedSample(name="federated_sampler_0", mode="exact_by_weight")
hetero_lr_0 = HeteroLR(name="hetero_lr_0", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.1,
init_param={"init_method": "random_uniform"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(scale_0, data=Data(intersection_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=scale_0.output.data))
pipeline.add_component(federated_sampler_0, data=Data(sample_weight_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=federated_sampler_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_lr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,930 | 41.145299 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/sample_weight/pipeline-sample-weight-multi-host.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import SampleWeight
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(
role='guest', party_id=guest).set_roles(
guest=guest, host=hosts, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="y",
label_type="int",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
class_weight="balanced")
sample_weight_0.get_party_instance(role='host', party_id=hosts).component_param(need_run=False)
hetero_lr_0 = HeteroLR(name="hetero_lr_0", optimizer="nesterov_momentum_sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=sample_weight_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,939 | 39.618557 | 109 |
py
|
FATE
|
FATE-master/examples/pipeline/sample_weight/pipeline-sample-weight-class-dict-feature-engineering.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import SampleWeight
from pipeline.component import Intersection
from pipeline.component import HeteroFeatureBinning
from pipeline.component import HeteroFeatureSelection
from pipeline.component import FeatureScale
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="y",
label_type="int",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
binning_param = {
"name": 'hetero_feature_binning_0',
"method": "quantile",
"compress_thres": 10000,
"head_size": 10000,
"error": 0.001,
"bin_num": 10,
"bin_indexes": -1,
"bin_names": None,
"category_indexes": None,
"category_names": None,
"adjustment_factor": 0.5,
"local_only": False,
"transform_param": {
"transform_cols": -1,
"transform_names": None,
"transform_type": "bin_num"
}
}
selection_param = {
"name": "hetero_feature_selection_0",
"select_col_indexes": -1,
"select_names": [],
"filter_methods": [
"iv_value_thres"
],
"iv_value_param": {
"value_threshold": 0.1
}}
hetero_feature_binning_0 = HeteroFeatureBinning(**binning_param)
hetero_feature_selection_0 = HeteroFeatureSelection(**selection_param)
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
class_weight={"0": 1, "1": 2})
sample_weight_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
feature_scale_0 = FeatureScale(name="feature_scale_0", method="standard_scale", need_run=True)
hetero_lr_0 = HeteroLR(name="hetero_lr_0", optimizer="nesterov_momentum_sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=sample_weight_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=hetero_feature_binning_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model]))
pipeline.add_component(feature_scale_0, data=Data(hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=feature_scale_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,546 | 38.906475 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-cv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="None", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
cv_param={"n_splits": 5,
"shuffle": False,
"random_seed": 42,
"need_cv": True
})
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,444 | 38.597701 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-sqn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sqn", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
floating_point_precision=23,
sqn_param={
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_linr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,413 | 39.127273 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-sparse-sqn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", output_format="sparse",
missing_fill=True, outlier_replace=False)
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True,
label_name="motor_speed",
label_type="float")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sqn", tol=0.001,
alpha=0.01, max_iter=2, early_stop="weight_diff", batch_size=100,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
sqn_param={
"update_interval_L": 3,
"memory_M": 5,
"sample_size": 5000,
"random_seed": None
})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,077 | 43.813187 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-sparse.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(
name="data_transform_0",
output_format="sparse",
missing_fill=True,
outlier_replace=False)
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True,
label_name="motor_speed",
label_type="float")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=2, early_stop="weight_diff", batch_size=100,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,776 | 41.920455 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-warm-start.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0", only_output_key=False)
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=5, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
callback_param={"callbacks": ["ModelCheckpoint"]},
init_param={"init_method": "zeros"},
floating_point_precision=23)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
hetero_linr_1 = HeteroLinR(name="hetero_linr_1", max_iter=15,
penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
floating_point_precision=23
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_linr_1, data=Data(train_data=intersection_0.output.data),
model=Model(hetero_linr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_1.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,108 | 41.360825 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-sample-weight.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import SampleWeight
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
sample_weight_0 = SampleWeight(name="sample_weight_0")
sample_weight_0.get_party_instance(role='guest', party_id=guest).component_param(need_run=True,
sample_weight_name="pm")
sample_weight_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
floating_point_precision=23)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_weight_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=sample_weight_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_linr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,617 | 40.981818 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-validate.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = [{"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"},
{"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}]
host_train_data = [{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"},
{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data[0])
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data[0])
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data[1])
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data[1])
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_1 = DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_1.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
callback_param={"callbacks": ["EarlyStopping", "PerformanceEvaluate"],
"validation_freqs": 1,
"early_stopping_rounds": 5,
"metrics": [
"mean_absolute_error",
"root_mean_squared_error"
],
"use_first_metric_only": False,
"save_freq": 1
}
)
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data,
validate_data=intersect_1.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,144 | 41.172131 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
floating_point_precision=23)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_linr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,116 | 38.586538 | 120 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-multi-host-cv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = [{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"},
{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}]
pipeline = PipeLine().set_initiator(
role='guest', party_id=guest).set_roles(
guest=guest, host=hosts, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data[0])
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data[1])
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="None", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"},
cv_param={"n_splits": 5,
"shuffle": False,
"random_seed": 42,
"need_cv": True
}
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,703 | 38.827957 | 105 |
py
|
FATE
|
FATE-master/examples/pipeline/hetero_linear_regression/pipeline-hetero-linr-multi-host.py
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
arbiter = parties.arbiter[0]
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = [{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"},
{"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}]
pipeline = PipeLine().set_initiator(
role='guest', party_id=guest).set_roles(
guest=guest, host=hosts, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data[0])
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data[1])
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True,
label_name="motor_speed",
label_type="float",
output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=hosts).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_linr_0 = HeteroLinR(name="hetero_linr_0", penalty="L2", optimizer="sgd", tol=0.001,
alpha=0.01, max_iter=20, early_stop="weight_diff", batch_size=-1,
learning_rate=0.15, decay=0.0, decay_sqrt=False,
init_param={"init_method": "zeros"})
evaluation_0 = Evaluation(name="evaluation_0", eval_type="regression", pos_label=1)
# evaluation_0.get_party_instance(role='host', party_id=hosts[0]).component_param(need_run=False)
# evaluation_0.get_party_instance(role='host', party_id=hosts[1]).component_param(need_run=False)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, intersection_0, hetero_linr_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,260 | 39.971154 | 105 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/homo_nn/local-homo_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pathlib
import numpy as np
import torch as t
from torch.utils.data import DataLoader, TensorDataset
import pandas
from pipeline.utils.tools import JobConfig
from federatedml.nn.backend.utils.common import global_seed
dataset = {
"vehicle": {
"guest": "examples/data/vehicle_scale_homo_guest.csv",
"host": "examples/data/vehicle_scale_homo_host.csv",
},
"breast": {
"guest": "examples/data/breast_homo_guest.csv",
"host": "examples/data/breast_homo_host.csv",
},
}
def fit(epoch, model, optimizer, loss, batch_size, dataset):
print(
'model is {}, loss is {}, optimizer is {}'.format(
model,
loss,
optimizer))
dl = DataLoader(dataset, batch_size=batch_size)
for i in range(epoch):
epoch_loss = 0
for feat, label in dl:
optimizer.zero_grad()
pred = model(feat)
l = loss(pred, label)
epoch_loss += l.detach().numpy()
l.backward()
optimizer.step()
print('epoch is {}, epoch loss is {}'.format(i, epoch_loss))
def compute_acc(pred, label, is_multy):
if is_multy:
pred = pred.argmax(axis=1)
else:
pred = (pred > 0.5) + 0
return float((pred == label).sum() / len(label))
def main(config="../../config.yaml", param="param_conf.yaml"):
if isinstance(param, str):
param = JobConfig.load_from_file(param)
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
epoch = param["epoch"]
lr = param["lr"]
batch_size = param.get("batch_size", -1)
is_multy = param["is_multy"]
data = dataset[param.get("dataset", "vehicle")]
global_seed(123)
if is_multy:
loss = t.nn.CrossEntropyLoss()
else:
loss = t.nn.BCELoss()
data_path = pathlib.Path(data_base_dir)
data_with_label = pandas.concat(
[
pandas.read_csv(data_path.joinpath(data["guest"]), index_col=0),
pandas.read_csv(data_path.joinpath(data["host"]), index_col=0),
]
).values
data = t.Tensor(data_with_label[:, 1:])
labels = t.Tensor(data_with_label[:, 0])
if is_multy:
labels = labels.type(t.int64)
else:
labels = labels.reshape((-1, 1))
ds = TensorDataset(data, labels)
input_shape = data.shape[1]
output_shape = 4 if is_multy else 1
out_act = t.nn.Softmax(dim=1) if is_multy else t.nn.Sigmoid()
model = t.nn.Sequential(
t.nn.Linear(input_shape, 16),
t.nn.ReLU(),
t.nn.Linear(16, output_shape),
out_act
)
if batch_size < 0:
batch_size = len(data_with_label)
optimizer = t.optim.Adam(model.parameters(), lr=lr)
fit(epoch, model, optimizer, loss, batch_size, ds)
pred_rs = model(data)
acc = compute_acc(pred_rs, labels, is_multy)
metric_summary = {"accuracy": acc}
print(metric_summary)
data_summary = {}
return data_summary, metric_summary
| 3,738 | 27.761538 | 76 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_nn/fate-homo_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform, HomoNN, Evaluation
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
from pipeline.component.nn import TrainerParam, DatasetParam
fate_torch_hook(t)
class dataset(object):
breast = {
"guest": {"name": "breast_homo_guest", "namespace": "experiment"},
"host": [
{"name": "breast_homo_host", "namespace": "experiment"},
{"name": "breast_homo_host", "namespace": "experiment"}
]
}
vehicle = {
"guest": {"name": "vehicle_scale_homo_guest", "namespace": "experiment"},
"host": [
{"name": "vehicle_scale_homo_host", "namespace": "experiment"},
{"name": "vehicle_scale_homo_host", "namespace": "experiment"}
]
}
def main(config="../../config.yaml", param="param_conf.yaml", namespace=""):
num_host = 1
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
epoch = param["epoch"]
lr = param["lr"]
batch_size = param.get("batch_size", -1)
is_multy = param["is_multy"]
data = getattr(dataset, param.get("dataset", "vehicle"))
if is_multy:
loss = t.nn.CrossEntropyLoss()
else:
loss = t.nn.BCELoss()
input_shape = 18 if is_multy else 30
output_shape = 4 if is_multy else 1
out_act = t.nn.Softmax(dim=1) if is_multy else t.nn.Sigmoid()
model = t.nn.Sequential(
t.nn.Linear(input_shape, 16),
t.nn.ReLU(),
t.nn.Linear(16, output_shape),
out_act
)
optimizer = t.optim.Adam(model.parameters(), lr=lr)
guest_train_data = data["guest"]
host_train_data = data["host"][:num_host]
for d in [guest_train_data, *host_train_data]:
d["namespace"] = f"{d['namespace']}{namespace}"
hosts = config.parties.host[:num_host]
pipeline = PipeLine() .set_initiator(
role='guest',
party_id=config.parties.guest[0]) .set_roles(
guest=config.parties.guest[0],
host=hosts,
arbiter=config.parties.arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(
role='guest',
party_id=config.parties.guest[0]).component_param(
table=guest_train_data)
for i in range(num_host):
reader_0.get_party_instance(role='host', party_id=hosts[i]) \
.component_param(table=host_train_data[i])
data_transform_0 = DataTransform(name="data_transform_0", with_label=True)
data_transform_0.get_party_instance(
role='guest', party_id=config.parties.guest[0]) .component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host',
party_id=hosts).component_param(
with_label=True)
if is_multy:
ds_param = DatasetParam(
dataset_name='table',
flatten_label=True,
label_dtype='long')
else:
ds_param = DatasetParam(dataset_name='table')
homo_nn_0 = HomoNN(
name="homo_nn_0",
trainer=TrainerParam(
trainer_name='fedavg_trainer',
epochs=epoch,
batch_size=batch_size,
),
dataset=ds_param,
torch_seed=100,
optimizer=optimizer,
loss=loss,
model=model)
homo_nn_1 = HomoNN(name="homo_nn_1")
if is_multy:
eval_type = "multi"
else:
eval_type = "binary"
evaluation_0 = Evaluation(
name='evaluation_0',
eval_type=eval_type,
metrics=[
"accuracy",
"precision",
"recall"])
pipeline.add_component(reader_0)
pipeline.add_component(
data_transform_0, data=Data(
data=reader_0.output.data))
pipeline.add_component(homo_nn_0, data=Data(
train_data=data_transform_0.output.data))
pipeline.add_component(
homo_nn_1, data=Data(
test_data=data_transform_0.output.data), model=Model(
homo_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=homo_nn_0.output.data))
pipeline.compile()
pipeline.fit()
metric_summary = parse_summary_result(
pipeline.get_component("evaluation_0").get_summary())
nn_0_data = pipeline.get_component("homo_nn_0").get_output_data()
nn_1_data = pipeline.get_component("homo_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
if eval_type == "binary":
# metric_nn = {
# "score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label),
# "ks_2samp": classification_metric.KSTest.compute(nn_0_score, nn_1_score),
# "mAP_D_value": classification_metric.AveragePrecisionScore().compute(nn_0_score, nn_1_score, nn_0_label,
# nn_1_label)}
# metric_summary["distribution_metrics"] = {"homo_nn": metric_nn}
if metric_summary is None:
metric_summary = {}
metric_summary["accuracy"] = (
nn_0_score == nn_0_label).sum() / len(nn_0_label)
# elif eval_type == "multi":
# metric_nn = {
# "score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label)}
# metric_summary["distribution_metrics"] = {"homo_nn": metric_nn}
data_summary = dict(
train={"guest": guest_train_data["name"], **{f"host_{i}": host_train_data[i]["name"] for i in range(num_host)}},
test={"guest": guest_train_data["name"], **{f"host_{i}": host_train_data[i]["name"] for i in range(num_host)}}
)
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main(args.param)
| 7,460 | 34.870192 | 120 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_nn/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/homo_sbt/gbdt-regression.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_multi.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = pd.concat([df_guest, df_host], axis=0)
y = df[label_name]
X = df.drop(label_name, axis=1)
X_guest = df_guest.drop(label_name, axis=1)
y_guest = df_guest[label_name]
clf = GradientBoostingRegressor(n_estimators=40)
clf.fit(X, y)
y_predict = clf.predict(X_guest)
result = {"mean_squared_error": mean_squared_error(y_guest, y_predict),
"mean_absolute_error": mean_absolute_error(y_guest, y_predict)
}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,429 | 32.287671 | 82 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_sbt/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/homo_sbt/gbdt-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_multi.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = pd.concat([df_guest, df_host], axis=0)
y = df[label_name]
X = df.drop(label_name, axis=1)
X_guest = df_guest.drop(label_name, axis=1)
y_guest = df_guest[label_name]
clf = GradientBoostingClassifier(n_estimators=50, learning_rate=0.3,)
clf.fit(X, y)
y_pred = clf.predict(X_guest)
acc = accuracy_score(y_guest, y_pred)
result = {"accuracy": acc}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,344 | 32.985507 | 88 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_sbt/fate-sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HomoSecureBoost
from pipeline.component import Reader
from pipeline.interface.data import Data
from pipeline.component import Evaluation
from pipeline.interface.model import Model
from pipeline.utils.tools import JobConfig
from pipeline.utils.tools import load_job_config
from federatedml.evaluation.metrics import regression_metric, classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param='./xgb_config_binary.yaml', namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": param['data_guest_train'], "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": param['data_guest_val'], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param['data_host_train'], "namespace": f"experiment{namespace}"}
host_validate_data = {"name": param['data_host_val'], "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name='data_transform_1')
reader_0, reader_1 = Reader(name="reader_0"), Reader(name='reader_1')
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0.get_party_instance(role='guest', party_id=guest).\
component_param(with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).\
component_param(with_label=True, output_format="dense")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_validate_data)
data_transform_1.get_party_instance(role='guest', party_id=guest).\
component_param(with_label=True, output_format="dense")
data_transform_1.get_party_instance(role='host', party_id=host).\
component_param(with_label=True, output_format="dense")
homo_secureboost_0 = HomoSecureBoost(name="homo_secureboost_0",
num_trees=param['tree_num'],
task_type=param['task_type'],
objective_param={"objective": param['loss_func']},
tree_param={"max_depth": param['tree_depth']},
validation_freqs=1,
subsample_feature_rate=1,
learning_rate=param['learning_rate'],
bin_num=50
)
homo_secureboost_1 = HomoSecureBoost(name="homo_secureboost_1")
evaluation_0 = Evaluation(name='evaluation_0', eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(homo_secureboost_0, data=Data(train_data=data_transform_0.output.data,
validate_data=data_transform_1.output.data))
pipeline.add_component(homo_secureboost_1, data=Data(test_data=data_transform_1.output.data),
model=Model(homo_secureboost_0.output.model))
pipeline.add_component(evaluation_0, data=Data(homo_secureboost_0.output.data))
pipeline.compile()
pipeline.fit()
sbt_0_data = pipeline.get_component("homo_secureboost_0").get_output_data()
sbt_1_data = pipeline.get_component("homo_secureboost_1").get_output_data()
sbt_0_score = extract_data(sbt_0_data, "predict_result")
sbt_0_label = extract_data(sbt_0_data, "label")
sbt_1_score = extract_data(sbt_1_data, "predict_result")
sbt_1_label = extract_data(sbt_1_data, "label")
sbt_0_score_label = extract_data(sbt_0_data, "predict_result", keep_id=True)
sbt_1_score_label = extract_data(sbt_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
if param['eval_type'] == "regression":
desc_sbt_0 = regression_metric.Describe().compute(sbt_0_score)
desc_sbt_1 = regression_metric.Describe().compute(sbt_1_score)
metric_summary["script_metrics"] = {"sbt_train": desc_sbt_0,
"sbt_validate": desc_sbt_1}
elif param['eval_type'] == "binary":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(sbt_0_score, sbt_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(sbt_0_score, sbt_1_score, sbt_0_label,
sbt_1_label)}
metric_summary["distribution_metrics"] = {"homo_sbt": metric_sbt}
elif param['eval_type'] == "multi":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label)}
metric_summary["distribution_metrics"] = {"homo_sbt": metric_sbt}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_validate_data["name"], "host": host_validate_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 7,207 | 50.120567 | 120 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_sbt/gbdt-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas as pd
import os
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_binary.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = pd.concat([df_guest, df_host], axis=0)
y = df[label_name]
X = df.drop(label_name, axis=1)
X_guest = df_guest.drop(label_name, axis=1)
y_guest = df_guest[label_name]
clf = GradientBoostingClassifier(n_estimators=120 if 'epsilon' in data_guest else 50, learning_rate=0.1)
clf.fit(X, y)
y_prob = clf.predict(X_guest)
try:
auc_score = roc_auc_score(y_guest, y_prob)
except BaseException:
print(f"no auc score available")
return
result = {"auc": auc_score}
import time
print(result)
print(data_guest)
time.sleep(3)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,492 | 30.961538 | 108 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_nn/local-hetero_nn.py
|
import argparse
import numpy as np
import os
import pandas
from sklearn import metrics
from pipeline.utils.tools import JobConfig
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from torch.utils.data import DataLoader, TensorDataset
from federatedml.nn.backend.utils.common import global_seed
fate_torch_hook(t)
class HeteroLocalModel(t.nn.Module):
def __init__(self, guest_btn, host_btn, interactive, top):
super().__init__()
self.guest_btn = guest_btn
self.host_btn = host_btn
self.inter = interactive
self.top = top
def forward(self, x1, x2):
return self.top(self.inter(self.guest_btn(x1), self.host_btn(x2)))
def build(param, shape1, shape2, lr):
global_seed(101)
guest_bottom = t.nn.Sequential(
nn.Linear(shape1, param["bottom_layer_units"]),
nn.ReLU()
)
host_bottom = t.nn.Sequential(
nn.Linear(shape2, param["bottom_layer_units"]),
nn.ReLU()
)
interactive_layer = t.nn.InteractiveLayer(
guest_dim=param["bottom_layer_units"],
host_dim=param["bottom_layer_units"],
host_num=1,
out_dim=param["interactive_layer_units"])
act = nn.Sigmoid() if param["top_layer_units"] == 1 else nn.Softmax(dim=1)
top_layer = t.nn.Sequential(
t.nn.Linear(
param["interactive_layer_units"],
param["top_layer_units"]),
act)
model = HeteroLocalModel(
guest_bottom,
host_bottom,
interactive_layer,
top_layer)
opt = t.optim.Adam(model.parameters(), lr=lr)
return model, opt
def fit(epoch, model, optimizer, loss, batch_size, dataset):
print(
'model is {}, loss is {}, optimizer is {}'.format(
model,
loss,
optimizer))
dl = DataLoader(dataset, batch_size=batch_size)
for i in range(epoch):
epoch_loss = 0
for xa, xb, label in dl:
optimizer.zero_grad()
pred = model(xa, xb)
l = loss(pred, label)
epoch_loss += l.detach().numpy()
l.backward()
optimizer.step()
print('epoch is {}, epoch loss is {}'.format(i, epoch_loss))
def predict(model, Xa, Xb):
pred_rs = model(Xb, Xa)
return pred_rs.detach().numpy()
def main(config="../../config.yaml", param="./hetero_nn_breast_config.yaml"):
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
# prepare data
Xb = pandas.read_csv(
os.path.join(
data_base_dir,
data_guest),
index_col=idx)
Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
y = Xb[label_name]
out = Xa.drop(Xb.index)
Xa = Xa.drop(out.index)
Xb = Xb.drop(label_name, axis=1)
Xa = t.Tensor(Xa.values)
Xb = t.Tensor(Xb.values)
y = t.Tensor(y.values)
if param["loss"] == "categorical_crossentropy":
loss = t.nn.CrossEntropyLoss()
y = y.type(t.int64).flatten()
else:
loss = t.nn.BCELoss()
y = y.reshape((-1, 1))
model, opt = build(
param, Xb.shape[1], Xa.shape[1], lr=param['learning_rate'])
dataset = TensorDataset(Xb, Xa, y)
fit(epoch=param['epochs'], model=model, optimizer=opt,
batch_size=param['batch_size'], dataset=dataset, loss=loss)
eval_result = {}
for metric in param["metrics"]:
if metric.lower() == "auc":
predict_y = predict(model, Xa, Xb)
auc = metrics.roc_auc_score(y, predict_y)
eval_result["auc"] = auc
elif metric == "accuracy":
predict_y = np.argmax(predict(model, Xa, Xb), axis=1)
acc = metrics.accuracy_score(
y_true=y.detach().numpy(), y_pred=predict_y)
eval_result["accuracy"] = acc
print(eval_result)
data_summary = {}
return data_summary, eval_result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
main()
| 4,651 | 27.365854 | 79 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_nn/fate-hetero_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component.nn import DatasetParam
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
fate_torch_hook(t)
def build(param, shape1, shape2):
guest_bottom = t.nn.Sequential(
nn.Linear(shape1, param["bottom_layer_units"]),
nn.ReLU()
)
host_bottom = t.nn.Sequential(
nn.Linear(shape2, param["bottom_layer_units"]),
nn.ReLU()
)
interactive_layer = t.nn.InteractiveLayer(
guest_dim=param["bottom_layer_units"],
host_dim=param["bottom_layer_units"],
host_num=1,
out_dim=param["interactive_layer_units"])
act = nn.Sigmoid() if param["top_layer_units"] == 1 else nn.Softmax(dim=1)
top_layer = t.nn.Sequential(
t.nn.Linear(
param["interactive_layer_units"],
param["top_layer_units"]),
act)
return guest_bottom, host_bottom, interactive_layer, top_layer
def main(
config="../../config.yaml",
param="./hetero_nn_breast_config.yaml",
namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {
"name": param["guest_table_name"],
"namespace": f"experiment{namespace}"}
host_train_data = {
"name": param["host_table_name"],
"namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(
role='guest', party_id=guest).set_roles(
guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(
role='guest',
party_id=guest).component_param(
table=guest_train_data)
reader_0.get_party_instance(
role='host',
party_id=host).component_param(
table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True)
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=False)
intersection_0 = Intersection(name="intersection_0")
guest_bottom, host_bottom, interactive_layer, top_layer = build(
param, param['shape1'], param['shape2'])
if param["loss"] == "categorical_crossentropy":
loss = t.nn.CrossEntropyLoss()
ds_param = DatasetParam(
dataset_name='table',
flatten_label=True,
label_dtype='long')
else:
loss = t.nn.BCELoss()
ds_param = DatasetParam(dataset_name='table')
hetero_nn_0 = HeteroNN(
name="hetero_nn_0",
epochs=param["epochs"],
interactive_layer_lr=param["learning_rate"],
batch_size=param["batch_size"],
seed=100,
dataset=ds_param)
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
guest_nn_0.add_bottom_model(guest_bottom)
guest_nn_0.add_top_model(top_layer)
host_nn_0.add_bottom_model(host_bottom)
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(
optimizer=t.optim.Adam(
lr=param['learning_rate']),
loss=loss)
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
if param["loss"] == "categorical_crossentropy":
eval_type = "multi"
else:
eval_type = "binary"
evaluation_0 = Evaluation(name="evaluation_0", eval_type=eval_type)
pipeline.add_component(reader_0)
pipeline.add_component(
data_transform_0, data=Data(
data=reader_0.output.data))
pipeline.add_component(
intersection_0, data=Data(
data=data_transform_0.output.data))
pipeline.add_component(
hetero_nn_0, data=Data(
train_data=intersection_0.output.data))
pipeline.add_component(
hetero_nn_1, data=Data(
test_data=intersection_0.output.data), model=Model(
hetero_nn_0.output.model))
pipeline.add_component(
evaluation_0, data=Data(
data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
nn_0_data = pipeline.get_component("hetero_nn_0").get_output_data()
nn_1_data = pipeline.get_component("hetero_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(
pipeline.get_component("evaluation_0").get_summary())
if eval_type == "binary":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(
nn_0_score_label,
nn_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(
nn_0_score,
nn_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(
nn_0_score,
nn_1_score,
nn_0_label,
nn_1_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
elif eval_type == "multi":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(
nn_0_score_label, nn_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
data_summary = {
"train": {
"guest": guest_train_data["name"],
"host": host_train_data["name"]},
"test": {
"guest": guest_train_data["name"],
"host": host_train_data["name"]}}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 7,650 | 32.853982 | 81 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_nn/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_nn_pytorch/local-hetero_nn.py
|
import argparse
import numpy as np
import os
from tensorflow import keras
import pandas
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
from sklearn import metrics
from pipeline.utils.tools import JobConfig
from sklearn.preprocessing import LabelEncoder
import torch as t
from torch import nn
from torch.utils.data import Dataset, DataLoader
import tqdm
from pipeline import fate_torch_hook
fate_torch_hook(t)
class TestModel(t.nn.Module):
def __init__(self, guest_input_shape, host_input_shape):
super(TestModel, self).__init__()
self.guest_bottom = t.nn.Sequential(
nn.Linear(guest_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
self.host_bottom = t.nn.Sequential(
nn.Linear(host_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
self.inter_a, self.inter_b = t.nn.Linear(8, 4, True), t.nn.Linear(8, 4, True)
self.top_model_guest = t.nn.Sequential(
nn.Linear(4, 1, True),
nn.Sigmoid()
)
def forward(self, data):
x_guest, x_host = data[0].type(t.float), data[1].type(t.float)
guest_fw = self.inter_a(self.guest_bottom(x_guest))
host_fw = self.inter_b(self.host_bottom(x_host))
out = self.top_model_guest(guest_fw + host_fw)
return out
def predict(self, data):
rs = self.forward(data)
return rs.detach().numpy()
class TestDataset(Dataset):
def __init__(self, guest_data, host_data, label):
super(TestDataset, self).__init__()
self.g = guest_data
self.h = host_data
self.l = label
def __getitem__(self, idx):
return self.g[idx], self.h[idx], self.l[idx]
def __len__(self):
return len(self.l)
def build(param, shape1, shape2):
return TestModel(shape1, shape2)
def main(config="./config.yaml", param="./hetero_nn_breast_config.yaml"):
try:
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
# prepare data
Xb = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
y = Xb[label_name]
out = Xa.drop(Xb.index)
Xa = Xa.drop(out.index)
Xb = Xb.drop(label_name, axis=1)
# torch model
model = build(param, Xb.shape[1], Xa.shape[1])
Xb = t.Tensor(Xb.values)
Xa = t.Tensor(Xa.values)
y = t.Tensor(y.values)
dataset = TestDataset(Xb, Xa, y)
batch_size = len(dataset) if param['batch_size'] == -1 else param['batch_size']
dataloader = DataLoader(dataset, batch_size=batch_size)
optimizer = t.optim.Adam(lr=param['learning_rate']).to_torch_instance(model.parameters())
if param['eval_type'] == 'binary':
loss_fn = t.nn.BCELoss()
for i in tqdm.tqdm(range(param['epochs'])):
for gd, hd, label in dataloader:
optimizer.zero_grad()
pred = model([gd, hd])
loss = loss_fn(pred.flatten(), label.type(t.float32))
loss.backward()
optimizer.step()
eval_result = {}
for metric in param["metrics"]:
if metric.lower() == "auc":
predict_y = model.predict([Xb, Xa])
auc = metrics.roc_auc_score(y, predict_y)
eval_result["auc"] = auc
elif metric == "accuracy":
predict_y = np.argmax(model.predict([Xb, Xa]), axis=1)
predict_y = label_encoder.inverse_transform(predict_y)
acc = metrics.accuracy_score(y_true=labels, y_pred=predict_y)
eval_result["accuracy"] = acc
data_summary = {}
except Exception as e:
print(e)
return data_summary, eval_result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 4,792 | 30.741722 | 97 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_nn_pytorch/fate-hetero_nn.py
|
import argparse
from collections import OrderedDict
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.component import Evaluation
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config, JobConfig
from pipeline.interface import Model
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
from pipeline import fate_torch_hook
import torch as t
from torch import nn
from torch.nn import init
from torch import optim
from pipeline import fate_torch as ft
fate_torch_hook(t)
def main(config="./config.yaml", param="./hetero_nn_breast_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": param["guest_table_name"], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param["host_table_name"], "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
guest_input_shape = param['guest_input_shape']
host_input_shape = param['host_input_shape']
# define model structures
bottom_model_guest = t.nn.Sequential(
nn.Linear(guest_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
bottom_model_host = t.nn.Sequential(
nn.Linear(host_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
interactive_layer = t.nn.Linear(8, 4, True)
top_model_guest = t.nn.Sequential(
nn.Linear(4, 1, True),
nn.Sigmoid()
)
loss_fn = nn.BCELoss()
opt: ft.optim.Adam = optim.Adam(lr=param['learning_rate'])
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=param["epochs"],
interactive_layer_lr=param["learning_rate"], batch_size=param["batch_size"],
early_stop="diff")
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
guest_nn_0.add_bottom_model(bottom_model_guest)
guest_nn_0.add_top_model(top_model_guest)
guest_nn_0.set_interactve_layer(interactive_layer)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
host_nn_0.add_bottom_model(bottom_model_host)
# do remember to compile
hetero_nn_0.compile(opt, loss=loss_fn)
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
evaluation_0 = Evaluation(name="evaluation_0", eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_nn_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
nn_0_data = pipeline.get_component("hetero_nn_0").get_output_data()
nn_1_data = pipeline.get_component("hetero_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
eval_type = param['eval_type']
if eval_type == "binary":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(nn_0_score, nn_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(nn_0_score, nn_1_score, nn_0_label,
nn_1_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
elif eval_type == "multi":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,139 | 40.768707 | 116 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/pipeline-lr-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./vehicle_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "vehicle_scale_hetero_guest.csv":
guest_data_table = 'vehicle_scale_hetero_guest'
host_data_table = 'vehicle_scale_hetero_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param["optimizer"],
"batch_size": param["batch_size"],
"masked_rate": 0,
"shuffle": False,
"early_stop": "diff",
"init_param": {
"init_method": param.get("init_method", 'random_uniform'),
"random_seed": param.get("random_seed", 103)
}
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **lr_param)
hetero_lr_1 = HeteroLR(name='hetero_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="multi")
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_lr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
lr_0_data = pipeline.get_component("hetero_lr_0").get_output_data()
lr_1_data = pipeline.get_component("hetero_lr_1").get_output_data()
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
metric_lr = {
"score_diversity_ratio": classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label)}
result_summary["distribution_metrics"] = {"hetero_lr": metric_lr}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-c", "--config", type=str,
help="config file", default="../../config.yaml")
parser.add_argument("-p", "--param", type=str,
help="config file for params", default="./vehicle_config.yaml")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,233 | 40.56 | 112 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/pipeline-sshe-lr-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroSSHELR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from fate_test.utils import extract_data, parse_summary_result
from federatedml.evaluation.metrics import classification_metric
def main(config="../../config.yaml", param="./lr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "default_credit_hetero_guest.csv":
guest_data_table = 'default_credit_hetero_guest'
host_data_table = 'default_credit_hetero_host'
elif data_set == 'breast_hetero_guest.csv':
guest_data_table = 'breast_hetero_guest'
host_data_table = 'breast_hetero_host'
elif data_set == 'give_credit_hetero_guest.csv':
guest_data_table = 'give_credit_hetero_guest'
host_data_table = 'give_credit_hetero_host'
elif data_set == 'epsilon_5k_hetero_guest.csv':
guest_data_table = 'epsilon_5k_hetero_guest'
host_data_table = 'epsilon_5k_hetero_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param["optimizer"], # use sgd
"batch_size": param["batch_size"],
"early_stop": "diff",
"tol": 1e-4,
"init_param": {
"init_method": param.get("init_method", 'random_uniform'),
"random_seed": param.get("random_seed", 103),
"fit_intercept": True
},
"reveal_strategy": param.get("reveal_strategy", "respectively"),
"reveal_every_iter": True
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
hetero_sshe_lr_0 = HeteroSSHELR(name='hetero_sshe_lr_0', **lr_param)
hetero_sshe_lr_1 = HeteroSSHELR(name='hetero_sshe_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_sshe_lr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_sshe_lr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_sshe_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_sshe_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
lr_0_data = pipeline.get_component("hetero_sshe_lr_0").get_output_data()
lr_1_data = pipeline.get_component("hetero_sshe_lr_1").get_output_data()
lr_0_score = extract_data(lr_0_data, "predict_result")
lr_0_label = extract_data(lr_0_data, "label")
lr_1_score = extract_data(lr_1_data, "predict_result")
lr_1_label = extract_data(lr_1_data, "label")
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
metric_lr = {
"score_diversity_ratio": classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(lr_0_score, lr_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(lr_0_score, lr_1_score, lr_0_label,
lr_1_label)}
result_summary["distribution_metrics"] = {"hetero_lr": metric_lr}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
print(f"result_summary: {result_summary}; data_summary: {data_summary}")
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-c", "--config", type=str,
help="config file", default="../../config.yaml")
parser.add_argument("-p", "--param", type=str,
help="config file for params", default="./breast_config.yaml")
args = parser.parse_args()
main(args.config, args.param)
| 7,364 | 43.636364 | 112 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/pipeline-sshe-lr-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroSSHELR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./vehicle_sshe_lr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "vehicle_scale_hetero_guest.csv":
guest_data_table = 'vehicle_scale_hetero_guest'
host_data_table = 'vehicle_scale_hetero_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param["optimizer"], # use sgd
"batch_size": param["batch_size"],
"early_stop": "diff",
"init_param": {
"init_method": param.get("init_method", 'random_uniform'),
"random_seed": param.get("random_seed", 103),
"fit_intercept": True
},
"reveal_strategy": param.get("reveal_strategy", "respectively"),
"reveal_every_iter": True
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
hetero_sshe_lr_0 = HeteroSSHELR(name='hetero_sshe_lr_0', **lr_param)
hetero_sshe_lr_1 = HeteroSSHELR(name='hetero_sshe_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="multi")
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_sshe_lr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_sshe_lr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_sshe_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_sshe_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
lr_0_data = pipeline.get_component("hetero_sshe_lr_0").get_output_data()
lr_1_data = pipeline.get_component("hetero_sshe_lr_1").get_output_data()
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
metric_lr = {
"score_diversity_ratio": classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label)}
result_summary["distribution_metrics"] = {"hetero_lr": metric_lr}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
print(f"result_summary: {result_summary}; data_summary: {data_summary}")
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-c", "--config", type=str,
help="config file", default="../../config.yaml")
parser.add_argument("-p", "--param", type=str,
help="config file for params", default="./vehicle_sshe_lr_config.yaml")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,491 | 41.431373 | 112 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/sklearn-lr-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score, roc_curve
import os
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./vechile_config.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
if isinstance(config, str):
config = JobConfig.load_from_file(config)
print(f"config: {config}")
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
config_param = {
"penalty": param["penalty"],
"max_iter": 100,
"alpha": param["alpha"],
"learning_rate": "optimal",
"eta0": param["learning_rate"],
"random_state": 105
}
# prepare data
df_guest = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix="host")
y = df[label_name]
X = df.drop(label_name, axis=1)
# x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
x_train, x_test, y_train, y_test = X, X, y, y
# lm = LogisticRegression(max_iter=20)
lm = SGDClassifier(loss="log", **config_param)
lm_fit = lm.fit(x_train, y_train)
y_pred = lm_fit.predict(x_test)
y_prob = lm_fit.predict_proba(x_test)[:, 1]
try:
auc_score = roc_auc_score(y_test, y_prob)
except BaseException:
print(f"no auc score available")
return
recall = recall_score(y_test, y_pred, average="macro")
pr = precision_score(y_test, y_pred, average="macro")
acc = accuracy_score(y_test, y_pred)
# y_predict_proba = est.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob)
ks = max(tpr - fpr)
result = {"auc": auc_score, "recall": recall, "precision": pr, "accuracy": acc}
print(result)
print(f"coef_: {lm_fit.coef_}, intercept_: {lm_fit.intercept_}, n_iter: {lm_fit.n_iter_}")
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-p", "--param", type=str, default="./breast_config.yaml",
help="config file for params")
args = parser.parse_args()
main(param=args.param)
| 3,244 | 34.271739 | 99 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/pipeline-lr-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from pipeline.runtime.entity import JobParameters
from fate_test.utils import extract_data, parse_summary_result
from federatedml.evaluation.metrics import classification_metric
def main(config="../../config.yaml", param="./lr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "default_credit_hetero_guest.csv":
guest_data_table = 'default_credit_hetero_guest'
host_data_table = 'default_credit_hetero_host'
elif data_set == 'breast_hetero_guest.csv':
guest_data_table = 'breast_hetero_guest'
host_data_table = 'breast_hetero_host'
elif data_set == 'give_credit_hetero_guest.csv':
guest_data_table = 'give_credit_hetero_guest'
host_data_table = 'give_credit_hetero_host'
elif data_set == 'epsilon_5k_hetero_guest.csv':
guest_data_table = 'epsilon_5k_hetero_guest'
host_data_table = 'epsilon_5k_hetero_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param["optimizer"],
"batch_size": param["batch_size"],
"shuffle": False,
"masked_rate": 0,
"early_stop": "diff",
"tol": 1e-5,
"floating_point_precision": param.get("floating_point_precision"),
"init_param": {
"init_method": param.get("init_method", 'random_uniform'),
"random_seed": param.get("random_seed", 103)
}
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **lr_param)
hetero_lr_1 = HeteroLR(name='hetero_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_lr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
job_parameters = JobParameters()
pipeline.fit(job_parameters)
lr_0_data = pipeline.get_component("hetero_lr_0").get_output_data()
lr_1_data = pipeline.get_component("hetero_lr_1").get_output_data()
lr_0_score = extract_data(lr_0_data, "predict_result")
lr_0_label = extract_data(lr_0_data, "label")
lr_1_score = extract_data(lr_1_data, "predict_result")
lr_1_label = extract_data(lr_1_data, "label")
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
metric_lr = {
"score_diversity_ratio": classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(lr_0_score, lr_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(lr_0_score, lr_1_score, lr_0_label,
lr_1_label)}
result_summary["distribution_metrics"] = {"hetero_lr": metric_lr}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-c", "--config", type=str,
help="config file", default="../../config.yaml")
parser.add_argument("-p", "--param", type=str,
help="config file for params", default="./breast_config.yaml")
args = parser.parse_args()
main(args.config, args.param)
| 7,300 | 42.718563 | 112 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/sklearn-lr-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas
from pipeline.utils.tools import JobConfig
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import precision_score, accuracy_score, recall_score
def main(config="../../config.yaml", param="./vechile_config.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": "optimal",
"eta0": param["learning_rate"],
"random_state": 105
}
# prepare data
df_guest = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix="host")
y = df[label_name]
X = df.drop(label_name, axis=1)
# lm = LogisticRegression(max_iter=20)
lm = SGDClassifier(loss="log", **config_param, shuffle=False)
lm_fit = lm.fit(X, y)
y_pred = lm_fit.predict(X)
recall = recall_score(y, y_pred, average="macro")
pr = precision_score(y, y_pred, average="macro")
acc = accuracy_score(y, y_pred)
result = {"accuracy": acc}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.param is not None:
main(args.param)
| 2,554 | 30.9375 | 86 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_lr/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/homo_lr/pipeline-lr-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HomoLR
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from fate_test.utils import extract_data, parse_summary_result
from federatedml.evaluation.metrics import classification_metric
def main(config="../../config.yaml", param="./breast_lr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "vehicle_scale_homo_guest.csv":
guest_data_table = 'vehicle_scale_homo_guest'
host_data_table = 'vehicle_scale_homo_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=True)
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param.get("optimizer", "sgd"),
"batch_size": param.get("batch_size", -1),
"init_param": {
"init_method": 'random_uniform'
}
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
homo_lr_0 = HomoLR(name='homo_lr_0', **lr_param)
homo_lr_1 = HomoLR(name='homo_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="multi")
evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(homo_lr_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(homo_lr_1, data=Data(test_data=data_transform_0.output.data),
model=Model(homo_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=homo_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
lr_0_data = pipeline.get_component("homo_lr_0").get_output_data()
lr_1_data = pipeline.get_component("homo_lr_1").get_output_data()
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
metric_lr = {
"score_diversity_ratio":
classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label)}
result_summary["distribution_metrics"] = {"homo_lr": metric_lr}
print(result_summary)
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 5,887 | 40.174825 | 109 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_lr/sklearn-lr-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score, roc_curve
from pipeline.utils.tools import JobConfig
import os
def main(config="../../config.yaml", param="./lr_config.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_guest = param["data_guest"]
data_host = param["data_host"]
data_test = param["data_test"]
idx = param["idx"]
label_name = param["label_name"]
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
config_param = {
"penalty": param["penalty"],
"max_iter": 100,
"alpha": param["alpha"],
"learning_rate": "optimal",
"eta0": param["learning_rate"]
}
# prepare data
df_guest = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
# df_test = pandas.read_csv(data_test, index_col=idx)
df = pandas.concat([df_guest, df_host], axis=0)
# df = df_guest.join(df_host, rsuffix="host")
y_train = df[label_name]
x_train = df.drop(label_name, axis=1)
# y_test = df_test[label_name]
# x_test = df_test.drop(label_name, axis=1)
x_test, y_test = x_train, y_train
# lm = LogisticRegression(max_iter=20)
lm = SGDClassifier(loss="log", **config_param)
lm_fit = lm.fit(x_train, y_train)
y_pred = lm_fit.predict(x_test)
y_prob = lm_fit.predict_proba(x_test)[:, 1]
auc_score = roc_auc_score(y_test, y_prob)
recall = recall_score(y_test, y_pred, average="macro")
pr = precision_score(y_test, y_pred, average="macro")
acc = accuracy_score(y_test, y_pred)
# y_predict_proba = est.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob)
ks = max(tpr - fpr)
result = {"auc": auc_score}
print(f"result: {result}")
print(f"coef_: {lm_fit.coef_}, intercept_: {lm_fit.intercept_}, n_iter: {lm_fit.n_iter_}")
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-p", "--param", type=str, default="./lr_config.yaml",
help="config file for params")
args = parser.parse_args()
main(args.param)
| 3,168 | 33.075269 | 99 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_lr/pipeline-lr-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HomoLR
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from fate_test.utils import extract_data, parse_summary_result
from federatedml.evaluation.metrics import classification_metric
def main(config="../../config.yaml", param="./breast_lr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_set = param.get("data_guest").split('/')[-1]
if data_set == "default_credit_homo_guest.csv":
guest_data_table = 'default_credit_guest'
host_data_table = 'default_credit_host1'
elif data_set == 'breast_homo_guest.csv':
guest_data_table = 'breast_homo_guest'
host_data_table = 'breast_homo_host'
elif data_set == 'give_credit_homo_guest.csv':
guest_data_table = 'give_credit_homo_guest'
host_data_table = 'give_credit_homo_host'
elif data_set == 'epsilon_5k_homo_guest.csv':
guest_data_table = 'epsilon_5k_homo_guest'
host_data_table = 'epsilon_5k_homo_host'
else:
raise ValueError(f"Cannot recognized data_set: {data_set}")
guest_train_data = {"name": guest_data_table, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_data_table, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=True)
lr_param = {
}
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": param["learning_rate"],
"optimizer": param.get("optimizer", "sgd"),
"batch_size": param.get("batch_size", -1),
"init_param": {
"init_method": param.get("init_method", 'random_uniform')
}
}
lr_param.update(config_param)
print(f"lr_param: {lr_param}, data_set: {data_set}")
homo_lr_0 = HomoLR(name='homo_lr_0', **lr_param)
homo_lr_1 = HomoLR(name='homo_lr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
evaluation_0.get_party_instance(role='host', party_id=host).component_param(need_run=False)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(homo_lr_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(homo_lr_1, data=Data(test_data=data_transform_0.output.data),
model=Model(homo_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=homo_lr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
result_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
lr_0_data = pipeline.get_component("homo_lr_0").get_output_data()
lr_1_data = pipeline.get_component("homo_lr_1").get_output_data()
lr_0_score = extract_data(lr_0_data, "predict_result")
lr_0_label = extract_data(lr_0_data, "label")
lr_1_score = extract_data(lr_1_data, "predict_result")
lr_1_label = extract_data(lr_1_data, "label")
lr_0_score_label = extract_data(lr_0_data, "predict_result", keep_id=True)
lr_1_score_label = extract_data(lr_1_data, "predict_result", keep_id=True)
metric_lr = {
"score_diversity_ratio": classification_metric.Distribution.compute(lr_0_score_label, lr_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(lr_0_score, lr_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(lr_0_score, lr_1_score, lr_0_label,
lr_1_label)}
result_summary["distribution_metrics"] = {"homo_lr": metric_lr}
return data_summary, result_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,816 | 41.874214 | 112 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_lr/sklearn-lr-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score, roc_curve
from pipeline.utils.tools import JobConfig
import os
def main(config="../../config.yaml", param="./lr_config.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
assert isinstance(param, dict)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
config_param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"alpha": param["alpha"],
"learning_rate": "optimal",
"eta0": param["learning_rate"],
"random_state": 123
}
# prepare data
df_guest = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
# df_test = pandas.read_csv(data_test, index_col=idx)
df = pandas.concat([df_guest, df_host], axis=0)
# df = df_guest.join(df_host, rsuffix="host")
y_train = df[label_name]
x_train = df.drop(label_name, axis=1)
# y_test = df_test[label_name]
# x_test = df_test.drop(label_name, axis=1)
x_test, y_test = x_train, y_train
# lm = LogisticRegression(max_iter=20)
lm = SGDClassifier(loss="log", **config_param)
lm_fit = lm.fit(x_train, y_train)
y_pred = lm_fit.predict(x_test)
acc = accuracy_score(y_test, y_pred)
result = {"accuracy": acc}
print('multi result', result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-p", "--param", type=str, default="./lr_config.yaml",
help="config file for params")
args = parser.parse_args()
main(args.param)
| 2,740 | 30.872093 | 99 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/homo_lr/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_fast_sbt/fate-fast-sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroFastSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
from pipeline.utils.tools import JobConfig
from federatedml.evaluation.metrics import regression_metric, classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./xgb_config_binary.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": param['data_guest_train'], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param['data_host_train'], "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": param['data_guest_val'], "namespace": f"experiment{namespace}"}
host_validate_data = {"name": param['data_host_val'], "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(
with_label=False)
data_transform_1.get_party_instance(role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(
with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
hetero_fast_sbt_0 = HeteroFastSecureBoost(name="hetero_fast_sbt_0",
num_trees=param['tree_num'],
task_type=param['task_type'],
objective_param={"objective": param['loss_func']},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": param['tree_depth']},
validation_freqs=1,
subsample_feature_rate=1,
learning_rate=param['learning_rate'],
guest_depth=param['guest_depth'],
host_depth=param['host_depth'],
tree_num_per_party=param['tree_num_per_party'],
work_mode=param['work_mode']
)
hetero_fast_sbt_1 = HeteroFastSecureBoost(name="hetero_fast_sbt_1")
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_fast_sbt_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(hetero_fast_sbt_1, data=Data(test_data=intersect_1.output.data),
model=Model(hetero_fast_sbt_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_fast_sbt_0.output.data))
pipeline.compile()
pipeline.fit()
sbt_0_data = pipeline.get_component("hetero_fast_sbt_0").get_output_data()
sbt_1_data = pipeline.get_component("hetero_fast_sbt_1").get_output_data()
sbt_0_score = extract_data(sbt_0_data, "predict_result")
sbt_0_label = extract_data(sbt_0_data, "label")
sbt_1_score = extract_data(sbt_1_data, "predict_result")
sbt_1_label = extract_data(sbt_1_data, "label")
sbt_0_score_label = extract_data(sbt_0_data, "predict_result", keep_id=True)
sbt_1_score_label = extract_data(sbt_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
if param['eval_type'] == "regression":
desc_sbt_0 = regression_metric.Describe().compute(sbt_0_score)
desc_sbt_1 = regression_metric.Describe().compute(sbt_1_score)
metric_summary["script_metrics"] = {"hetero_fast_sbt_train": desc_sbt_0,
"hetero_fast_sbt_validate": desc_sbt_1}
elif param['eval_type'] == "binary":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(sbt_0_score, sbt_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(sbt_0_score, sbt_1_score, sbt_0_label,
sbt_1_label)}
metric_summary["distribution_metrics"] = {"hetero_fast_sbt": metric_sbt}
elif param['eval_type'] == "multi":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_fast_sbt": metric_sbt}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 8,136 | 50.828025 | 119 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_fast_sbt/gbdt-regression.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_reg.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingRegressor(random_state=0, n_estimators=50)
clf.fit(X, y)
y_predict = clf.predict(X)
result = {"mean_absolute_error": mean_absolute_error(y, y_predict)}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,212 | 30.614286 | 82 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_fast_sbt/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_fast_sbt/gbdt-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_multi.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingClassifier(random_state=0, n_estimators=50, learning_rate=0.3)
clf.fit(X, y)
y_pred = clf.predict(X)
try:
auc_score = roc_auc_score(y, y_pred)
except BaseException:
print(f"no auc score available")
acc = accuracy_score(y, y_pred)
result = {"accuracy": acc}
print('multi result', result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,402 | 32.375 | 88 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_fast_sbt/gbdt-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_binary.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingClassifier(random_state=0, n_estimators=120 if 'epsilon' in data_guest else 50)
clf.fit(X, y)
y_prob = clf.predict(X)
try:
auc_score = roc_auc_score(y, y_prob)
except BaseException:
print(f"no auc score available")
return
result = {"auc": auc_score}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,385 | 31.684932 | 105 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_sbt/gbdt-regression.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_reg.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingRegressor(random_state=0, n_estimators=50)
clf.fit(X, y)
y_predict = clf.predict(X)
result = {"mean_absolute_error": mean_absolute_error(y, y_predict)}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,212 | 30.614286 | 82 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_sbt/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_sbt/gbdt-multi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_multi.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingClassifier(random_state=0, n_estimators=50, learning_rate=0.3)
clf.fit(X, y)
y_pred = clf.predict(X)
try:
auc_score = roc_auc_score(y, y_pred)
except BaseException:
print(f"no auc score available")
acc = accuracy_score(y, y_pred)
result = {"accuracy": acc}
print('multi result', result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,402 | 32.375 | 88 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_sbt/fate-sbt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroSecureBoost
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.component import Evaluation
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
from pipeline.utils.tools import JobConfig
from federatedml.evaluation.metrics import regression_metric, classification_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./xgb_config_binary.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
# data sets
guest_train_data = {"name": param['data_guest_train'], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param['data_host_train'], "namespace": f"experiment{namespace}"}
guest_validate_data = {"name": param['data_guest_val'], "namespace": f"experiment{namespace}"}
host_validate_data = {"name": param['data_host_val'], "namespace": f"experiment{namespace}"}
# init pipeline
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host,)
# set data reader and data-io
reader_0, reader_1 = Reader(name="reader_0"), Reader(name="reader_1")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role="guest", party_id=guest).component_param(table=guest_validate_data)
reader_1.get_party_instance(role="host", party_id=host).component_param(table=host_validate_data)
data_transform_0, data_transform_1 = DataTransform(name="data_transform_0"), DataTransform(name="data_transform_1")
data_transform_0.get_party_instance(role="guest", party_id=guest).\
component_param(with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
data_transform_1.get_party_instance(role="guest", party_id=guest).\
component_param(with_label=True, output_format="dense")
data_transform_1.get_party_instance(role="host", party_id=host).component_param(with_label=False)
# data intersect component
intersect_0 = Intersection(name="intersection_0")
intersect_1 = Intersection(name="intersection_1")
# secure boost component
multi_mode = 'single_output'
if 'multi_mode' in param:
multi_mode = param['multi_mode']
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secure_boost_0",
num_trees=param['tree_num'],
task_type=param['task_type'],
objective_param={"objective": param['loss_func']},
encrypt_param={"method": "Paillier"},
tree_param={"max_depth": param['tree_depth']},
validation_freqs=1,
learning_rate=param['learning_rate'],
multi_mode=multi_mode
)
hetero_secure_boost_1 = HeteroSecureBoost(name="hetero_secure_boost_1")
# evaluation component
evaluation_0 = Evaluation(name="evaluation_0", eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(intersect_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersect_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersect_0.output.data,
validate_data=intersect_1.output.data))
pipeline.add_component(hetero_secure_boost_1, data=Data(test_data=intersect_1.output.data),
model=Model(hetero_secure_boost_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
sbt_0_data = pipeline.get_component("hetero_secure_boost_0").get_output_data()
sbt_1_data = pipeline.get_component("hetero_secure_boost_1").get_output_data()
sbt_0_score = extract_data(sbt_0_data, "predict_result")
sbt_0_label = extract_data(sbt_0_data, "label")
sbt_1_score = extract_data(sbt_1_data, "predict_result")
sbt_1_label = extract_data(sbt_1_data, "label")
sbt_0_score_label = extract_data(sbt_0_data, "predict_result", keep_id=True)
sbt_1_score_label = extract_data(sbt_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
if param['eval_type'] == "regression":
desc_sbt_0 = regression_metric.Describe().compute(sbt_0_score)
desc_sbt_1 = regression_metric.Describe().compute(sbt_1_score)
metric_summary["script_metrics"] = {"hetero_sbt_train": desc_sbt_0,
"hetero_sbt_validate": desc_sbt_1}
elif param['eval_type'] == "binary":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(sbt_0_score, sbt_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(sbt_0_score, sbt_1_score, sbt_0_label,
sbt_1_label)}
metric_summary["distribution_metrics"] = {"hetero_sbt": metric_sbt}
elif param['eval_type'] == "multi":
metric_sbt = {
"score_diversity_ratio": classification_metric.Distribution.compute(sbt_0_score_label, sbt_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_sbt": metric_sbt}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 7,905 | 50.337662 | 119 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_sbt/gbdt-binary.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score
from sklearn.ensemble import GradientBoostingClassifier
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./gbdt_config_binary.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
print('config is {}'.format(config))
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
print('data base dir is', data_base_dir)
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix='host')
y = df[label_name]
X = df.drop(label_name, axis=1)
clf = GradientBoostingClassifier(random_state=0, n_estimators=120 if 'epsilon' in data_guest else 50)
clf.fit(X, y)
y_prob = clf.predict(X)
try:
auc_score = roc_auc_score(y, y_prob)
except BaseException:
print(f"no auc score available")
return
result = {"auc": auc_score}
print(result)
return {}, result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.param)
main()
| 2,385 | 31.684932 | 105 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_linear_regression/fate-linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroLinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import regression_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./linr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense",
label_name=param["label_name"], label_type="float")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"optimizer": param["optimizer"],
"learning_rate": param["learning_rate"],
"init_param": param["init_param"],
"batch_size": param["batch_size"],
"alpha": param["alpha"]
}
hetero_linr_0 = HeteroLinR(name='hetero_linr_0', **param)
hetero_linr_1 = HeteroLinR(name='hetero_linr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="regression",
metrics=["r2_score",
"mean_squared_error",
"root_mean_squared_error",
"explained_variance"])
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_linr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_linr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_linr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
data_linr_0 = extract_data(pipeline.get_component("hetero_linr_0").get_output_data(), "predict_result")
data_linr_1 = extract_data(pipeline.get_component("hetero_linr_1").get_output_data(), "predict_result")
desc_linr_0 = regression_metric.Describe().compute(data_linr_0)
desc_linr_1 = regression_metric.Describe().compute(data_linr_1)
metric_summary["script_metrics"] = {"linr_train": desc_linr_0,
"linr_validate": desc_linr_1}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY FATE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 5,832 | 41.576642 | 109 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_linear_regression/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_linear_regression/fate-sshe-linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroSSHELinR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import regression_metric
from fate_test.utils import extract_data, parse_summary_result
def main(config="../../config.yaml", param="./sshe_linr_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
if isinstance(param, str):
param = JobConfig.load_from_file(param)
guest_train_data = {"name": "motor_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "motor_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense",
label_name=param["label_name"], label_type="float")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
# define Intersection component
intersection_0 = Intersection(name="intersection_0")
param = {
"penalty": param["penalty"],
"max_iter": param["max_iter"],
"optimizer": param["optimizer"],
"learning_rate": param["learning_rate"],
"init_param": param["init_param"],
"batch_size": param["batch_size"],
"alpha": param["alpha"],
"early_stop": param["early_stop"],
"reveal_strategy": param["reveal_strategy"],
"tol": 1e-6,
"reveal_every_iter": True
}
hetero_sshe_linr_0 = HeteroSSHELinR(name='hetero_sshe_linr_0', **param)
hetero_sshe_linr_1 = HeteroSSHELinR(name='hetero_sshe_linr_1')
evaluation_0 = Evaluation(name='evaluation_0', eval_type="regression",
metrics=["r2_score",
"mean_squared_error",
"root_mean_squared_error",
"explained_variance"])
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_sshe_linr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_sshe_linr_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_sshe_linr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_sshe_linr_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
data_linr_0 = extract_data(pipeline.get_component(
"hetero_sshe_linr_0").get_output_data().get("data"), "predict_result")
data_linr_1 = extract_data(pipeline.get_component(
"hetero_sshe_linr_1").get_output_data().get("data"), "predict_result")
desc_linr_0 = regression_metric.Describe().compute(data_linr_0)
desc_linr_1 = regression_metric.Describe().compute(data_linr_1)
metric_summary["script_metrics"] = {"linr_train": desc_linr_0,
"linr_validate": desc_linr_1}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY FATE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,093 | 41.615385 | 109 |
py
|
FATE
|
FATE-master/examples/benchmark_quality/hetero_linear_regression/local-linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pandas
import numpy as np
import os
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
from pipeline.utils.tools import JobConfig
def main(config="../../config.yaml", param="./linr_config.yaml"):
# obtain config
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
# prepare data
df_guest = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
df_host = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
df = df_guest.join(df_host, rsuffix="host")
y = df[label_name]
X = df.drop(label_name, axis=1)
lm = SGDRegressor(loss="squared_loss", penalty=param["penalty"], random_state=42,
fit_intercept=True, max_iter=param["max_iter"], average=param["batch_size"])
lm_fit = lm.fit(X, y)
y_pred = lm_fit.predict(X)
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
r2 = r2_score(y, y_pred)
explained_var = explained_variance_score(y, y_pred)
metric_summary = {"r2_score": r2,
"mean_squared_error": mse,
"root_mean_squared_error": rmse,
"explained_variance": explained_var}
data_summary = {}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY LOCAL JOB")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.param is not None:
main(args.param)
else:
main()
| 2,596 | 34.094595 | 98 |
py
|
FATE
|
FATE-master/examples/dsl/v2/feature_scale/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/dsl/v2/model_loader/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 614 | 40 | 75 |
py
|
FATE
|
FATE-master/examples/dsl/v2/homo_onehot/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 614 | 40 | 75 |
py
|
FATE
|
FATE-master/examples/dsl/v2/label_transform/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/data/upload_config/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_manually_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["manually", "unique_value", "iv_filter", "statistic_filter"],
"manually_param": {
"filter_out_indexes": [1, 2],
"filter_out_names": ["x3", "x4"]
},
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
param = {
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-4,
"alpha": 0.01,
"max_iter": 5,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"validation_freqs": None,
"early_stopping_rounds": None
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,577 | 40.043062 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_sample_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import FederatedSample
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
'name': 'sample_0',
"mode": "stratified",
"method": "downsample",
"fractions": [[0, 0.5], [1, 0.8]],
"need_run": True
}
sample_0 = FederatedSample(**param)
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
param = {
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-4,
"alpha": 0.01,
"max_iter": 5,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"validation_freqs": None,
"early_stopping_rounds": None
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(sample_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=sample_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=sample_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=sample_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 8,767 | 39.781395 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_union_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import Union
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data_0 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_train_data_1 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data_0 = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data_1 = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data_0 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_train_data_1 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data_0 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data_1 = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
reader_2 = Reader(name="reader_2")
reader_3 = Reader(name="reader_3")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data_0)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data_1)
reader_2.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data_0)
reader_3.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data_1)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data_0)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_train_data_1)
reader_2.get_party_instance(role='host', party_id=host).component_param(table=host_test_data_0)
reader_3.get_party_instance(role='host', party_id=host).component_param(table=host_test_data_1)
param = {
"name": "union_0",
"keep_duplicate": True
}
union_0 = Union(**param)
param = {
"name": "union_1",
"keep_duplicate": True
}
union_1 = Union(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["manually", "iv_filter", "statistic_filter"],
"manually_param": {
"filter_out_indexes": [1, 2],
"filter_out_names": ["x2", "x3"]
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.01]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, True],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
param = {
"penalty": "L2",
"validation_freqs": None,
"early_stopping_rounds": None,
"max_iter": 5
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(reader_2)
pipeline.add_component(reader_3)
pipeline.add_component(union_0, data=Data(data=[reader_0.output.data, reader_1.output.data]))
pipeline.add_component(union_1, data=Data(data=[reader_2.output.data, reader_3.output.data]))
pipeline.add_component(data_transform_0, data=Data(data=union_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=union_1.output.data), model=Model(data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0,
data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1,
data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 9,262 | 42.693396 | 109 |
py
|
FATE
|
FATE-master/examples/experiment_template/pipeline/hetero_lr/pipeline_train_test_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component import FeatureScale
from pipeline.component import DataTransform
from pipeline.component import HeteroLR
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_eval_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
reader_2 = Reader(name="reader_2")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data)
reader_2.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_eval_data)
reader_2.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1") # start component numbering at 1
data_transform_2 = DataTransform(name="data_transform_2") # start component numbering at 2
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(**param)
# get and configure DataTransform party instance of host
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
data_transform_2.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
data_transform_0.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(**param)
data_transform_2.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
intersection_2 = Intersection(name="intersection_2", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
hetero_feature_selection_2 = HeteroFeatureSelection(name='hetero_feature_selection_2')
param = {
"name": "hetero_scale_0",
"method": "standard_scale"
}
hetero_scale_0 = FeatureScale(**param)
hetero_scale_1 = FeatureScale(name='hetero_scale_1')
hetero_scale_2 = FeatureScale(name='hetero_scale_2')
param = {
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-4,
"alpha": 0.01,
"max_iter": 5,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
},
"validation_freqs": None,
"early_stopping_rounds": None
}
hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param)
hetero_lr_1 = HeteroLR(name='hetero_lr_1')
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(reader_2)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1,
data=Data(data=reader_1.output.data), model=Model(data_transform_0.output.model))
pipeline.add_component(data_transform_2,
data=Data(data=reader_2.output.data), model=Model(data_transform_1.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(intersection_2, data=Data(data=data_transform_2.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
pipeline.add_component(hetero_feature_selection_2, data=Data(data=intersection_2.output.data),
model=Model(hetero_feature_selection_1.output.model))
pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data))
pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data),
model=Model(hetero_scale_0.output.model))
pipeline.add_component(hetero_scale_2, data=Data(data=hetero_feature_selection_2.output.data),
model=Model(hetero_scale_1.output.model))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data,
validate_data=hetero_scale_1.output.data))
pipeline.add_component(hetero_lr_1, data=Data(test_data=hetero_scale_2.output.data),
model=Model(hetero_lr_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data, hetero_lr_1.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 10,215 | 44.004405 | 109 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.