repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
FATE
|
FATE-master/python/fate_client/pipeline/component/hetero_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.param.logistic_regression_param import HeteroLogisticParam
from pipeline.component.component_base import FateComponent
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.logger import LOGGER
class HeteroLR(FateComponent, HeteroLogisticParam):
def __init__(self, **kwargs):
FateComponent.__init__(self, **kwargs)
# print (self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
HeteroLogisticParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "HeteroLR"
| 1,343 | 35.324324 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/__init__.py
|
from pipeline.component.nn.interface import save_to_fate, save_to_fate_llm, DatasetParam, TrainerParam
__all__ = ["save_to_fate_llm", "DatasetParam", "TrainerParam"]
| 167 | 41 | 102 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/interface.py
|
from pipeline.param.base_param import BaseParam
import sys
def not_working_save_to_fate(*args, **kwargs):
raise ValueError(
'save to fate not working, please check if your ipython is installed, '
'and if ipython.get_ipython() is working')
def not_working_save_to_fate_llm(*args, **kwargs):
raise ValueError(
'save to fate_llm not working, please check if your ipython is installed, '
'and if ipython.get_ipython() is working')
try:
import IPython as ipy
from IPython.core.magic import register_cell_magic
except ImportError as e:
ipy = None
register_cell_magic = None
# check
if register_cell_magic is not None:
if ipy.get_ipython():
@register_cell_magic
def save_to_fate(line, cell):
# search for federatedml path
base_path = None
for p in sys.path:
if p.endswith('/fate/python'):
base_path = p
break
if base_path is None:
raise ValueError(
'cannot find fate/python in system path, please check your configuration')
base_path = base_path + '/federatedml/'
model_pth = 'nn/model_zoo/'
dataset_pth = 'nn/dataset/'
trainer_pth = 'nn/homo/trainer/'
aggregator_pth = 'framework/homo/aggregator/'
loss_path = 'nn/loss/'
mode_map = {
'model': model_pth,
'trainer': trainer_pth,
'aggregator': aggregator_pth,
'dataset': dataset_pth,
'loss': loss_path
}
args = line.split()
assert len(
args) == 2, "input args len is not 2, got {} \n expect format: %%save_to_fate SAVE_MODE FILENAME \n SAVE_MODE in ['model', 'dataset', 'trainer', 'loss', 'aggregator'] FILE_NAME xxx.py".format(args)
modes_avail = ['model', 'dataset', 'trainer', 'aggregator', 'loss']
save_mode = args[0]
file_name = args[1]
assert save_mode in modes_avail, 'avail modes are {}, got {}'.format(
modes_avail, save_mode)
assert file_name.endswith('.py'), 'save file should be a .py'
with open(base_path + mode_map[save_mode] + file_name, 'w') as f:
f.write(cell)
ipy.get_ipython().run_cell(cell)
else:
save_to_fate = not_working_save_to_fate
else:
save_to_fate = not_working_save_to_fate
# check
if register_cell_magic is not None:
if ipy.get_ipython():
@register_cell_magic
def save_to_fate_llm(line, cell):
# search for federatedml path
base_path = None
for p in sys.path:
if p.endswith('/fate/python'):
base_path = p
break
if base_path is None:
raise ValueError(
'cannot find fate/python in system path, please check your configuration')
base_path = base_path + '/fate_llm/'
model_pth = 'model_zoo/'
dataset_pth = 'dataset/'
mode_map = {
'model': model_pth,
'dataset': dataset_pth,
}
args = line.split()
assert len(
args) == 2, "input args len is not 2, got {} \n expect format: %%save_to_fate SAVE_MODE FILENAME \n SAVE_MODE in ['model', 'dataset', 'trainer', 'loss', 'aggregator'] FILE_NAME xxx.py".format(args)
modes_avail = ['model', 'dataset']
save_mode = args[0]
file_name = args[1]
assert save_mode in modes_avail, 'avail modes are {}, got {}'.format(
modes_avail, save_mode)
assert file_name.endswith('.py'), 'save file should be a .py'
with open(base_path + mode_map[save_mode] + file_name, 'w') as f:
f.write(cell)
ipy.get_ipython().run_cell(cell)
else:
save_to_fate_llm = not_working_save_to_fate_llm
else:
save_to_fate_llm = not_working_save_to_fate_llm
class TrainerParam(BaseParam):
def __init__(self, trainer_name=None, **kwargs):
super(TrainerParam, self).__init__()
self.trainer_name = trainer_name
self.param = kwargs
def check(self):
if self.trainer_name is None:
raise ValueError(
'You did not specify the trainer name, please set the trainer name')
self.check_string(self.trainer_name, 'trainer_name')
def to_dict(self):
ret = {'trainer_name': self.trainer_name, 'param': self.param}
return ret
class DatasetParam(BaseParam):
def __init__(self, dataset_name=None, **kwargs):
super(DatasetParam, self).__init__()
self.dataset_name = dataset_name
self.param = kwargs
def check(self):
self.check_string(self.dataset_name, 'dataset_name')
def to_dict(self):
ret = {'dataset_name': self.dataset_name, 'param': self.param}
return ret
| 5,089 | 32.486842 | 215 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/models/sequantial.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.nn.backend.torch.base import Sequential as Seq
from pipeline.component.nn.backend.torch.cust import CustModel
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
class Sequential(object):
def __init__(self):
self.__config_type = None
self._model = None
def is_empty(self):
return self._model is None
def get_model(self):
return self._model
def add(self, layer):
_IS_TF_KERAS = False
try:
import tensorflow as tf
_IS_TF_KERAS = isinstance(layer, tf.Module)
except ImportError:
pass
if _IS_TF_KERAS:
# please notice that keras backend now is abandoned, hetero & homo nn support keras backend no more,
# but pipeline keras interface is kept
layer_type = "keras"
else:
layer_type = "torch"
is_layer = hasattr(
layer,
"__module__") and "pipeline.component.nn.backend.torch.nn" == getattr(
layer,
"__module__")
is_seq = isinstance(layer, Seq)
is_cust_model = isinstance(layer, CustModel)
is_interactive_layer = isinstance(layer, InteractiveLayer)
if not (is_layer or is_cust_model or is_interactive_layer or is_seq):
raise ValueError(
"Layer type {} not support yet, added layer must be a FateTorchLayer or a fate_torch "
"Sequential, remember to call fate_torch_hook() before using pipeline "
"".format(
type(layer)))
self._add_layer(layer, layer_type)
def _add_layer(self, layer, layer_type, replace=True):
if layer_type == 'torch':
if self._model is None or replace:
self._model = Seq()
self.__config_type = layer_type
elif layer_type == 'keras':
# please notice that keras backend now is abandoned, hetero & homo nn support keras backend no more,
# but pipeline keras interface is kept
from pipeline.component.nn.models.keras_interface import SequentialModel
self.__config_type = layer_type
self._model = SequentialModel()
self._model.add(layer)
def get_layer_type(self):
return self.__config_type
def get_loss_config(self, loss):
return self._model.get_loss_config(loss)
def get_optimizer_config(self, optimizer):
return self._model.get_optimizer_config(optimizer)
def get_network_config(self):
if not self.__config_type:
raise ValueError("Empty layer find, can't get config")
return self._model.get_network_config()
def __repr__(self):
return self._model.__repr__()
| 3,465 | 34.731959 | 112 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/models/keras_interface.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
_TF_KERAS_VALID = False
try:
from tensorflow.keras.models import Sequential
_TF_KERAS_VALID = True
except ImportError:
pass
def build_model(model_type="sequential"):
if model_type != "sequential":
raise ValueError("Only support sequential model now")
return SequentialModel()
class SequentialModel(object):
def __init__(self):
if _TF_KERAS_VALID:
self._model = Sequential()
else:
self._model = None
def add(self, layer):
if not _TF_KERAS_VALID:
raise ImportError(
"Please install tensorflow first, "
"can not import sequential model from tensorflow.keras.model !!!")
self._model.add(layer)
@staticmethod
def get_loss_config(loss):
if isinstance(loss, str):
return loss
if loss.__module__ == "tensorflow.python.keras.losses":
return loss.__name__
raise ValueError(
"keras sequential model' loss should be string of losses function of tf_keras")
@staticmethod
def get_optimizer_config(optimizer):
if isinstance(optimizer, str):
return optimizer
opt_config = optimizer.get_config()
if "name" in opt_config:
opt_config["optimizer"] = opt_config["name"]
del opt_config["name"]
return opt_config
def get_network_config(self):
if not _TF_KERAS_VALID:
raise ImportError(
"Please install tensorflow first, "
"can not import sequential model from tensorflow.keras.model !!!")
return json.loads(self._model.to_json())
| 2,295 | 28.435897 | 91 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/models/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/import_hook.py
|
try:
from pipeline.component.nn.backend.torch import nn as nn_
from pipeline.component.nn.backend.torch import init as init_
from pipeline.component.nn.backend.torch import optim as optim_
from pipeline.component.nn.backend.torch.cust import CustModel, CustLoss
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def monkey_patch(torch_nn, fate_torch_module):
for name in fate_torch_module.__dict__.keys():
if '__' in name: # skip no related variables
continue
if name in torch_nn.__dict__.keys():
torch_nn.__dict__[name] = fate_torch_module.__dict__[name]
def fate_torch_hook(torch_module_var):
"""
This is a monkey patch function that modify torch modules to use fate_torch layers and Components
:param torch_module_var:
:return:
"""
if torch_module_var.__name__ == 'torch':
monkey_patch(torch_module_var.nn, nn_)
monkey_patch(torch_module_var.optim, optim_)
monkey_patch(torch_module_var.nn.init, init_)
setattr(torch_module_var.nn, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.nn':
monkey_patch(torch_module_var, nn_)
setattr(torch_module_var, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.optim':
monkey_patch(torch_module_var, optim_)
elif torch_module_var.__name__ == 'torch.nn.init':
monkey_patch(torch_module_var, init_)
else:
raise ValueError(
'this module: {} does not support fate torch hook'.format(torch_module_var))
return torch_module_var
| 1,920 | 36.666667 | 101 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/base.py
|
import json
import torch as t
from torch.nn import Sequential as tSequential
from pipeline.component.nn.backend.torch.operation import OpBase
class FateTorchLayer(object):
def __init__(self):
t.nn.Module.__init__(self)
self.param_dict = dict()
self.initializer = {'weight': None, 'bias': None}
self.optimizer = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['layer'] = type(self).__name__
ret_dict['initializer'] = {}
if self.initializer['weight']:
ret_dict['initializer']['weight'] = self.initializer['weight']
if self.initializer['bias']:
ret_dict['initializer']['bias'] = self.initializer['bias']
return ret_dict
def add_optimizer(self, opt):
self.optimizer = opt
class FateTorchLoss(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['loss_fn'] = type(self).__name__
return ret_dict
class FateTorchOptimizer(object):
def __init__(self):
self.param_dict = dict()
self.torch_class = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['optimizer'] = type(self).__name__
ret_dict['config_type'] = 'pytorch'
return ret_dict
def check_params(self, params):
if isinstance(
params,
FateTorchLayer) or isinstance(
params,
Sequential):
params.add_optimizer(self)
params = params.parameters()
else:
params = params
l_param = list(params)
if len(l_param) == 0:
# fake parameters, for the case that there are only cust model
return [t.nn.Parameter(t.Tensor([0]))]
return l_param
def register_optimizer(self, input_):
if input_ is None:
return
if isinstance(
input_,
FateTorchLayer) or isinstance(
input_,
Sequential):
input_.add_optimizer(self)
def to_torch_instance(self, parameters):
return self.torch_class(parameters, **self.param_dict)
class Sequential(tSequential):
def to_dict(self):
"""
get the structure of current sequential
"""
rs = {}
idx = 0
for k in self._modules:
ordered_name = str(idx) + '-' + k
rs[ordered_name] = self._modules[k].to_dict()
idx += 1
return rs
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
def add_optimizer(self, opt):
setattr(self, 'optimizer', opt)
def add(self, layer):
if isinstance(layer, Sequential):
self._modules = layer._modules
# copy optimizer
if hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
elif isinstance(layer, FateTorchLayer):
self.add_module(str(len(self)), layer)
# update optimizer if dont have
if not hasattr(self, 'optimizer') and hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
else:
raise ValueError(
'unknown input layer type {}, this type is not supported'.format(
type(layer)))
@staticmethod
def get_loss_config(loss: FateTorchLoss):
return loss.to_dict()
def get_optimizer_config(self, optimizer=None):
if hasattr(self, 'optimizer'):
return self.optimizer.to_dict()
else:
return optimizer.to_dict()
def get_network_config(self):
return self.to_dict()
def get_torch_instance(fate_torch_nn_class: FateTorchLayer, param):
parent_torch_class = fate_torch_nn_class.__bases__
if issubclass(fate_torch_nn_class, OpBase):
return fate_torch_nn_class(**param)
for cls in parent_torch_class:
if issubclass(cls, t.nn.Module):
return cls(**param)
return None
| 4,209 | 26.880795 | 81 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/optim.py
|
from torch import optim
from pipeline.component.nn.backend.torch.base import FateTorchOptimizer
class ASGD(optim.ASGD, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lambd=0.0001,
alpha=0.75,
t0=1000000.0,
weight_decay=0,
foreach=None,
maximize=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lambd'] = lambd
self.param_dict['alpha'] = alpha
self.param_dict['t0'] = t0
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.ASGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer ASGD without initiated parameters'.format(type(self).__name__)
class Adadelta(optim.Adadelta, FateTorchOptimizer):
def __init__(self, params=None, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['rho'] = rho
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adadelta.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adadelta without initiated parameters'.format(type(self).__name__)
class Adagrad(optim.Adagrad, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lr_decay'] = lr_decay
self.param_dict['weight_decay'] = weight_decay
self.param_dict['initial_accumulator_value'] = initial_accumulator_value
self.param_dict['eps'] = eps
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adagrad.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adagrad without initiated parameters'.format(type(self).__name__)
class Adam(optim.Adam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adam without initiated parameters'.format(type(self).__name__)
class AdamW(optim.AdamW, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.AdamW.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer AdamW without initiated parameters'.format(type(self).__name__)
class Adamax(optim.Adamax, FateTorchOptimizer):
def __init__(self, params=None, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adamax.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adamax without initiated parameters'.format(type(self).__name__)
class LBFGS(optim.LBFGS, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-07,
tolerance_change=1e-09,
history_size=100,
line_search_fn=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['max_iter'] = max_iter
self.param_dict['max_eval'] = max_eval
self.param_dict['tolerance_grad'] = tolerance_grad
self.param_dict['tolerance_change'] = tolerance_change
self.param_dict['history_size'] = history_size
self.param_dict['line_search_fn'] = line_search_fn
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.LBFGS.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer LBFGS without initiated parameters'.format(type(self).__name__)
class NAdam(optim.NAdam, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.002,
betas=(
0.9,
0.999),
eps=1e-08,
weight_decay=0,
momentum_decay=0.004,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum_decay'] = momentum_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.NAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer NAdam without initiated parameters'.format(type(self).__name__)
class RAdam(optim.RAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RAdam without initiated parameters'.format(type(self).__name__)
class RMSprop(optim.RMSprop, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
alpha=0.99,
eps=1e-08,
weight_decay=0,
momentum=0,
centered=False,
foreach=None,
maximize=False,
differentiable=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['alpha'] = alpha
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum'] = momentum
self.param_dict['centered'] = centered
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.param_dict['differentiable'] = differentiable
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RMSprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RMSprop without initiated parameters'.format(type(self).__name__)
class Rprop(optim.Rprop, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50), foreach=None, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['etas'] = etas
self.param_dict['step_sizes'] = step_sizes
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Rprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Rprop without initiated parameters'.format(type(self).__name__)
class SGD(optim.SGD, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['momentum'] = momentum
self.param_dict['dampening'] = dampening
self.param_dict['weight_decay'] = weight_decay
self.param_dict['nesterov'] = nesterov
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SGD without initiated parameters'.format(type(self).__name__)
class SparseAdam(optim.SparseAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SparseAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SparseAdam without initiated parameters'.format(type(self).__name__)
| 12,959 | 30.228916 | 118 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/cust.py
|
from torch import nn
import importlib
from pipeline.component.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
import difflib
MODEL_PATH = None
LOSS_PATH = None
def str_simi(str_a, str_b):
return difflib.SequenceMatcher(None, str_a, str_b).quick_ratio()
def get_class(module_name, class_name, param, base_path):
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module('{}.{}'.format(base_path, module_name))
try:
name_simi_list = []
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, nn.Module) and v is not nn.Module:
if v.__name__ == class_name:
return v(**param)
else:
name_simi_list += ([(str_simi(class_name, v.__name__), v)])
sort_by_simi = sorted(name_simi_list, key=lambda x: -x[0])
if len(sort_by_simi) > 0:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}. Do you mean {}?'. format(
module_name, class_name, sort_by_simi[0][1].__name__))
else:
raise ValueError('Did not find any class in {}.py that is subclass of nn.Module and named {}'.
format(module_name, class_name))
except ValueError as e:
raise e
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {'module_name': module_name, 'class_name': class_name, 'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self, module_path=None):
if module_path is None:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
MODEL_PATH)
else:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
module_path)
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
class CustLoss(FateTorchLoss, nn.Module):
def __init__(self, loss_module_name, class_name, **kwargs):
super(CustLoss, self).__init__()
assert isinstance(loss_module_name, str), 'loss module name must be a str, specify the module in the model_zoo'
assert isinstance(class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {'loss_module_name': loss_module_name, 'class_name': class_name, 'param': kwargs}
self._loss_fn = None
def init_loss_fn(self):
if self._loss_fn is None:
self._loss_fn = self.get_pytorch_model()
def forward(self, pred, label):
if self._loss_fn is None:
raise ValueError('loss not init, call init_loss_fn() function')
return self._loss_fn(pred, label)
def get_pytorch_model(self, module_path=None):
module_name: str = self.param_dict['loss_module_name']
class_name: str = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_path is None:
return get_class(module_name=module_name, class_name=class_name, param=module_param, base_path=LOSS_PATH)
else:
return get_class(module_name=module_name, class_name=class_name, param=module_param, base_path=module_path)
def __repr__(self):
return 'CustLoss({})'.format(str(self.param_dict))
| 4,188 | 36.738739 | 119 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/init.py
|
import copy
import torch as t
from torch.nn import init as torch_init
import functools
from pipeline.component.nn.backend.torch.base import FateTorchLayer
from pipeline.component.nn.backend.torch.base import Sequential
str_init_func_map = {
"uniform": torch_init.uniform_,
"normal": torch_init.normal_,
"constant": torch_init.constant_,
"xavier_uniform": torch_init.xavier_uniform_,
"xavier_normal": torch_init.xavier_normal_,
"kaiming_uniform": torch_init.kaiming_uniform_,
"kaiming_normal": torch_init.kaiming_normal_,
"eye": torch_init.eye_,
"dirac": torch_init.dirac_,
"orthogonal": torch_init.orthogonal_,
"sparse": torch_init.sparse_,
"zeros": torch_init.zeros_,
"ones": torch_init.ones_
}
#
# def extract_param(func):
#
# args = inspect.getargspec(func)
# keys = args[0][1:]
# if len(keys) == 0:
# return {}
# defaults = args[-1]
# args_map = {}
# if defaults is not None:
# for idx, i in enumerate(keys[-len(defaults):]):
# args_map[i] = defaults[idx]
#
# for i in keys:
# if i not in args_map:
# args_map[i] = Required()
#
# return args_map
def init_weight(m, initializer):
if hasattr(m, 'weight'):
initializer(m.weight)
# LSTM RNN
if hasattr(m, 'weight_hh_l0'):
initializer(m.weight_hh_l0)
# LSTM RNN
if hasattr(m, 'weight_ih_l0'):
initializer(m.weight_ih_l0)
def init_bias(m, initializer):
if hasattr(
m,
'bias') and not isinstance(
m.bias,
bool) and m.bias is not None: # LSTM, RNN .bias is bool
initializer(m.bias)
# LSTM RNN
if hasattr(m, 'bias_hh_l0') and m.bias_hh_l0 is not None:
initializer(m.bias_hh_l0)
# LSTM RNN
if hasattr(m, 'bias_ih_l0') and m.bias_ih_l0 is not None:
initializer(m.bias_ih_l0)
def get_init_func_type(init='weight'):
if init == 'weight':
return init_weight
elif init == 'bias':
return init_bias
else:
return None
def recursive_init(m, init_func, obj):
if len(list(m.children())) > 0:
if m == obj:
return
recursive_init(m, init_func, m)
else:
try:
init_func(m)
except Exception as e:
print('initialize layer {} failed, exception is :{}'.format(m, e))
def make_apply_func(torch_initializer, param_dict, init_func, layer):
initializer = functools.partial(torch_initializer, **param_dict)
init_func = functools.partial(init_func, initializer=initializer)
recursive_init_func = functools.partial(
recursive_init, obj=layer, init_func=init_func)
return recursive_init_func, param_dict
def get_init_dict(init_func, param_dict, init_type):
rev_dict = {v: k for k, v in str_init_func_map.items()}
rs = {
'init_type': init_type,
'init_func': rev_dict[init_func],
'param': param_dict}
return rs
def record_initializer(layers, init_dict):
if isinstance(layers, FateTorchLayer):
if init_dict['init_type'] == 'weight':
layers.initializer['weight'] = init_dict
elif init_dict['init_type'] == 'bias':
layers.initializer['bias'] = init_dict
def run_init(torch_initializer, input_var, init, layer):
# recursive init
if isinstance(layer, Sequential):
for sub_layer in layer:
run_init(torch_initializer, input_var, init, sub_layer)
# init layer
elif isinstance(layer, FateTorchLayer) or isinstance(layer, t.nn.Module):
recursive_init_func, param_dict = make_apply_func(
torch_initializer, copy.deepcopy(input_var), get_init_func_type(init), layer)
layer.apply(recursive_init_func)
record_initializer(
layer,
get_init_dict(
torch_initializer,
param_dict,
init))
else:
try:
return torch_initializer(layer, **input_var)
except Exception as e:
print(e)
print('skip initialization')
"""
Init Func
"""
def local_extract(local_dict):
param = {}
for k, v in local_dict.items():
if k != 'layer' and k != 'init':
param[k] = v
return copy.deepcopy(param)
def uniform_(layer, a=0, b=1, init='weight'):
run_init(
str_init_func_map['uniform'],
local_extract(
locals()),
init,
layer)
def normal_(layer, mean=0, std=1, init='weight'):
run_init(str_init_func_map['normal'], local_extract(locals()), init, layer)
def constant_(layer, val, init='weight'):
run_init(
str_init_func_map['constant'],
local_extract(
locals()),
init,
layer)
def ones_(layer, init='weight'):
run_init(str_init_func_map['ones'], local_extract(locals()), init, layer)
def zeros_(layer, init='weight'):
run_init(str_init_func_map['zeros'], local_extract(locals()), init, layer)
def eye_(layer, init='weight'):
run_init(str_init_func_map['eye'], local_extract(locals()), init, layer)
def dirac_(layer, group=1, init='weight'):
run_init(str_init_func_map['dirac'], local_extract(locals()), init, layer)
def xavier_uniform_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_uniform'],
local_extract(locals()), init, layer)
def xavier_normal_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_normal'],
local_extract(locals()), init, layer)
def kaiming_uniform_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_uniform'],
local_extract(locals()), init, layer)
def kaiming_normal_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_normal'],
local_extract(locals()), init, layer)
def orthogonal_(layer, gain=1, init='weight'):
run_init(
str_init_func_map['orthogonal'],
local_extract(
locals()),
init,
layer)
def sparse_(layer, sparsity, std=0.01, init='weight'):
run_init(str_init_func_map['sparse'], local_extract(locals()), init, layer)
str_fate_torch_init_func_map = {
"uniform": uniform_,
"normal": normal_,
"constant": constant_,
"xavier_uniform": xavier_uniform_,
"xavier_normal": xavier_normal_,
"kaiming_uniform": kaiming_uniform_,
"kaiming_normal": kaiming_normal_,
"eye": eye_,
"dirac": dirac_,
"orthogonal": orthogonal_,
"sparse": sparse_,
"zeros": zeros_,
"ones": ones_
}
if __name__ == '__main__':
pass
| 6,775 | 25.677165 | 89 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/nn.py
|
from pipeline.component.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from pipeline.component.nn.backend.torch.base import Sequential
from torch import nn
class Bilinear(nn.modules.linear.Bilinear, FateTorchLayer):
def __init__(
self,
in1_features,
in2_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in1_features'] = in1_features
self.param_dict['in2_features'] = in2_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Bilinear.__init__(self, **self.param_dict)
class Identity(nn.modules.linear.Identity, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.linear.Identity.__init__(self, **self.param_dict)
class LazyLinear(nn.modules.linear.LazyLinear, FateTorchLayer):
def __init__(
self,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.LazyLinear.__init__(self, **self.param_dict)
class Linear(nn.modules.linear.Linear, FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Linear.__init__(self, **self.param_dict)
class NonDynamicallyQuantizableLinear(
nn.modules.linear.NonDynamicallyQuantizableLinear,
FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.NonDynamicallyQuantizableLinear.__init__(
self, **self.param_dict)
class GRU(nn.modules.rnn.GRU, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.GRU.__init__(self, **self.param_dict)
class GRUCell(nn.modules.rnn.GRUCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.GRUCell.__init__(self, **self.param_dict)
class LSTM(nn.modules.rnn.LSTM, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.LSTM.__init__(self, **self.param_dict)
class LSTMCell(nn.modules.rnn.LSTMCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.LSTMCell.__init__(self, **self.param_dict)
class RNN(nn.modules.rnn.RNN, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.RNN.__init__(self, **self.param_dict)
class RNNBase(nn.modules.rnn.RNNBase, FateTorchLayer):
def __init__(
self,
mode,
input_size,
hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0.0,
bidirectional=False,
proj_size=0,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_layers'] = num_layers
self.param_dict['bias'] = bias
self.param_dict['batch_first'] = batch_first
self.param_dict['dropout'] = dropout
self.param_dict['bidirectional'] = bidirectional
self.param_dict['proj_size'] = proj_size
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['mode'] = mode
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNBase.__init__(self, **self.param_dict)
class RNNCell(nn.modules.rnn.RNNCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
nonlinearity='tanh',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['nonlinearity'] = nonlinearity
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCell.__init__(self, **self.param_dict)
class RNNCellBase(nn.modules.rnn.RNNCellBase, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias,
num_chunks,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict['bias'] = bias
self.param_dict['num_chunks'] = num_chunks
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCellBase.__init__(self, **self.param_dict)
class Embedding(nn.modules.sparse.Embedding, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding_idx'] = padding_idx
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.Embedding.__init__(self, **self.param_dict)
class EmbeddingBag(nn.modules.sparse.EmbeddingBag, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
mode='mean',
sparse=False,
_weight=None,
include_last_offset=False,
padding_idx=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['mode'] = mode
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['include_last_offset'] = include_last_offset
self.param_dict['padding_idx'] = padding_idx
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.EmbeddingBag.__init__(self, **self.param_dict)
class AlphaDropout(nn.modules.dropout.AlphaDropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.AlphaDropout.__init__(self, **self.param_dict)
class Dropout(nn.modules.dropout.Dropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout.__init__(self, **self.param_dict)
class Dropout1d(nn.modules.dropout.Dropout1d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout1d.__init__(self, **self.param_dict)
class Dropout2d(nn.modules.dropout.Dropout2d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout2d.__init__(self, **self.param_dict)
class Dropout3d(nn.modules.dropout.Dropout3d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout3d.__init__(self, **self.param_dict)
class FeatureAlphaDropout(
nn.modules.dropout.FeatureAlphaDropout,
FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.FeatureAlphaDropout.__init__(
self, **self.param_dict)
class _DropoutNd(nn.modules.dropout._DropoutNd, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout._DropoutNd.__init__(self, **self.param_dict)
class CELU(nn.modules.activation.CELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.CELU.__init__(self, **self.param_dict)
class ELU(nn.modules.activation.ELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ELU.__init__(self, **self.param_dict)
class GELU(nn.modules.activation.GELU, FateTorchLayer):
def __init__(self, approximate='none', **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['approximate'] = approximate
self.param_dict.update(kwargs)
nn.modules.activation.GELU.__init__(self, **self.param_dict)
class GLU(nn.modules.activation.GLU, FateTorchLayer):
def __init__(self, dim=-1, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.GLU.__init__(self, **self.param_dict)
class Hardshrink(nn.modules.activation.Hardshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Hardshrink.__init__(self, **self.param_dict)
class Hardsigmoid(nn.modules.activation.Hardsigmoid, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardsigmoid.__init__(self, **self.param_dict)
class Hardswish(nn.modules.activation.Hardswish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardswish.__init__(self, **self.param_dict)
class Hardtanh(nn.modules.activation.Hardtanh, FateTorchLayer):
def __init__(
self,
min_val=-1.0,
max_val=1.0,
inplace=False,
min_value=None,
max_value=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['min_val'] = min_val
self.param_dict['max_val'] = max_val
self.param_dict['inplace'] = inplace
self.param_dict['min_value'] = min_value
self.param_dict['max_value'] = max_value
self.param_dict.update(kwargs)
nn.modules.activation.Hardtanh.__init__(self, **self.param_dict)
class LeakyReLU(nn.modules.activation.LeakyReLU, FateTorchLayer):
def __init__(self, negative_slope=0.01, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['negative_slope'] = negative_slope
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.LeakyReLU.__init__(self, **self.param_dict)
class LogSigmoid(nn.modules.activation.LogSigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.LogSigmoid.__init__(self, **self.param_dict)
class LogSoftmax(nn.modules.activation.LogSoftmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.LogSoftmax.__init__(self, **self.param_dict)
class Mish(nn.modules.activation.Mish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Mish.__init__(self, **self.param_dict)
class MultiheadAttention(
nn.modules.activation.MultiheadAttention,
FateTorchLayer):
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dropout'] = dropout
self.param_dict['bias'] = bias
self.param_dict['add_bias_kv'] = add_bias_kv
self.param_dict['add_zero_attn'] = add_zero_attn
self.param_dict['kdim'] = kdim
self.param_dict['vdim'] = vdim
self.param_dict['batch_first'] = batch_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['embed_dim'] = embed_dim
self.param_dict['num_heads'] = num_heads
self.param_dict.update(kwargs)
nn.modules.activation.MultiheadAttention.__init__(
self, **self.param_dict)
class PReLU(nn.modules.activation.PReLU, FateTorchLayer):
def __init__(
self,
num_parameters=1,
init=0.25,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_parameters'] = num_parameters
self.param_dict['init'] = init
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.activation.PReLU.__init__(self, **self.param_dict)
class RReLU(nn.modules.activation.RReLU, FateTorchLayer):
def __init__(
self,
lower=0.125,
upper=0.3333333333333333,
inplace=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lower'] = lower
self.param_dict['upper'] = upper
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.RReLU.__init__(self, **self.param_dict)
class ReLU(nn.modules.activation.ReLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU.__init__(self, **self.param_dict)
class ReLU6(nn.modules.activation.ReLU6, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU6.__init__(self, **self.param_dict)
class SELU(nn.modules.activation.SELU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SELU.__init__(self, **self.param_dict)
class SiLU(nn.modules.activation.SiLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SiLU.__init__(self, **self.param_dict)
class Sigmoid(nn.modules.activation.Sigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Sigmoid.__init__(self, **self.param_dict)
class Softmax(nn.modules.activation.Softmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmax.__init__(self, **self.param_dict)
class Softmax2d(nn.modules.activation.Softmax2d, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softmax2d.__init__(self, **self.param_dict)
class Softmin(nn.modules.activation.Softmin, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmin.__init__(self, **self.param_dict)
class Softplus(nn.modules.activation.Softplus, FateTorchLayer):
def __init__(self, beta=1, threshold=20, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['beta'] = beta
self.param_dict['threshold'] = threshold
self.param_dict.update(kwargs)
nn.modules.activation.Softplus.__init__(self, **self.param_dict)
class Softshrink(nn.modules.activation.Softshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Softshrink.__init__(self, **self.param_dict)
class Softsign(nn.modules.activation.Softsign, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softsign.__init__(self, **self.param_dict)
class Tanh(nn.modules.activation.Tanh, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanh.__init__(self, **self.param_dict)
class Tanhshrink(nn.modules.activation.Tanhshrink, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanhshrink.__init__(self, **self.param_dict)
class Threshold(nn.modules.activation.Threshold, FateTorchLayer):
def __init__(self, threshold, value, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict['threshold'] = threshold
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.activation.Threshold.__init__(self, **self.param_dict)
class Conv1d(nn.modules.conv.Conv1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv1d.__init__(self, **self.param_dict)
class Conv2d(nn.modules.conv.Conv2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv2d.__init__(self, **self.param_dict)
class Conv3d(nn.modules.conv.Conv3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv3d.__init__(self, **self.param_dict)
class ConvTranspose1d(nn.modules.conv.ConvTranspose1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose1d.__init__(self, **self.param_dict)
class ConvTranspose2d(nn.modules.conv.ConvTranspose2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose2d.__init__(self, **self.param_dict)
class ConvTranspose3d(nn.modules.conv.ConvTranspose3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose3d.__init__(self, **self.param_dict)
class LazyConv1d(nn.modules.conv.LazyConv1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv1d.__init__(self, **self.param_dict)
class LazyConv2d(nn.modules.conv.LazyConv2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv2d.__init__(self, **self.param_dict)
class LazyConv3d(nn.modules.conv.LazyConv3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv3d.__init__(self, **self.param_dict)
class LazyConvTranspose1d(nn.modules.conv.LazyConvTranspose1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose1d.__init__(self, **self.param_dict)
class LazyConvTranspose2d(nn.modules.conv.LazyConvTranspose2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose2d.__init__(self, **self.param_dict)
class LazyConvTranspose3d(nn.modules.conv.LazyConvTranspose3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose3d.__init__(self, **self.param_dict)
class _ConvNd(nn.modules.conv._ConvNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvNd.__init__(self, **self.param_dict)
class _ConvTransposeMixin(nn.modules.conv._ConvTransposeMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeMixin.__init__(self, **self.param_dict)
class _ConvTransposeNd(nn.modules.conv._ConvTransposeNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeNd.__init__(self, **self.param_dict)
class _LazyConvXdMixin(nn.modules.conv._LazyConvXdMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._LazyConvXdMixin.__init__(self, **self.param_dict)
class Transformer(nn.modules.transformer.Transformer, FateTorchLayer):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
custom_encoder=None,
custom_decoder=None,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict['num_encoder_layers'] = num_encoder_layers
self.param_dict['num_decoder_layers'] = num_decoder_layers
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['custom_encoder'] = custom_encoder
self.param_dict['custom_decoder'] = custom_decoder
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.transformer.Transformer.__init__(self, **self.param_dict)
class TransformerDecoder(
nn.modules.transformer.TransformerDecoder,
FateTorchLayer):
def __init__(self, decoder_layer, num_layers, norm=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['decoder_layer'] = decoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoder.__init__(
self, **self.param_dict)
class TransformerDecoderLayer(
nn.modules.transformer.TransformerDecoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoderLayer.__init__(
self, **self.param_dict)
class TransformerEncoder(
nn.modules.transformer.TransformerEncoder,
FateTorchLayer):
def __init__(
self,
encoder_layer,
num_layers,
norm=None,
enable_nested_tensor=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['enable_nested_tensor'] = enable_nested_tensor
self.param_dict['encoder_layer'] = encoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoder.__init__(
self, **self.param_dict)
class TransformerEncoderLayer(
nn.modules.transformer.TransformerEncoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoderLayer.__init__(
self, **self.param_dict)
class AdaptiveAvgPool1d(nn.modules.pooling.AdaptiveAvgPool1d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool1d.__init__(self, **self.param_dict)
class AdaptiveAvgPool2d(nn.modules.pooling.AdaptiveAvgPool2d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool2d.__init__(self, **self.param_dict)
class AdaptiveAvgPool3d(nn.modules.pooling.AdaptiveAvgPool3d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool3d.__init__(self, **self.param_dict)
class AdaptiveMaxPool1d(nn.modules.pooling.AdaptiveMaxPool1d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool1d.__init__(self, **self.param_dict)
class AdaptiveMaxPool2d(nn.modules.pooling.AdaptiveMaxPool2d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool2d.__init__(self, **self.param_dict)
class AdaptiveMaxPool3d(nn.modules.pooling.AdaptiveMaxPool3d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool3d.__init__(self, **self.param_dict)
class AvgPool1d(nn.modules.pooling.AvgPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool1d.__init__(self, **self.param_dict)
class AvgPool2d(nn.modules.pooling.AvgPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool2d.__init__(self, **self.param_dict)
class AvgPool3d(nn.modules.pooling.AvgPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool3d.__init__(self, **self.param_dict)
class FractionalMaxPool2d(
nn.modules.pooling.FractionalMaxPool2d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool2d.__init__(
self, **self.param_dict)
class FractionalMaxPool3d(
nn.modules.pooling.FractionalMaxPool3d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool3d.__init__(
self, **self.param_dict)
class LPPool1d(nn.modules.pooling.LPPool1d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool1d.__init__(self, **self.param_dict)
class LPPool2d(nn.modules.pooling.LPPool2d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool2d.__init__(self, **self.param_dict)
class MaxPool1d(nn.modules.pooling.MaxPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool1d.__init__(self, **self.param_dict)
class MaxPool2d(nn.modules.pooling.MaxPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool2d.__init__(self, **self.param_dict)
class MaxPool3d(nn.modules.pooling.MaxPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool3d.__init__(self, **self.param_dict)
class MaxUnpool1d(nn.modules.pooling.MaxUnpool1d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool1d.__init__(self, **self.param_dict)
class MaxUnpool2d(nn.modules.pooling.MaxUnpool2d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool2d.__init__(self, **self.param_dict)
class MaxUnpool3d(nn.modules.pooling.MaxUnpool3d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool3d.__init__(self, **self.param_dict)
class _AdaptiveAvgPoolNd(
nn.modules.pooling._AdaptiveAvgPoolNd,
FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveAvgPoolNd.__init__(self, **self.param_dict)
class _AdaptiveMaxPoolNd(
nn.modules.pooling._AdaptiveMaxPoolNd,
FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveMaxPoolNd.__init__(self, **self.param_dict)
class _AvgPoolNd(nn.modules.pooling._AvgPoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._AvgPoolNd.__init__(self, **self.param_dict)
class _LPPoolNd(nn.modules.pooling._LPPoolNd, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._LPPoolNd.__init__(self, **self.param_dict)
class _MaxPoolNd(nn.modules.pooling._MaxPoolNd, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._MaxPoolNd.__init__(self, **self.param_dict)
class _MaxUnpoolNd(nn.modules.pooling._MaxUnpoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._MaxUnpoolNd.__init__(self, **self.param_dict)
class BatchNorm1d(nn.modules.batchnorm.BatchNorm1d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm1d.__init__(self, **self.param_dict)
class BatchNorm2d(nn.modules.batchnorm.BatchNorm2d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm2d.__init__(self, **self.param_dict)
class BatchNorm3d(nn.modules.batchnorm.BatchNorm3d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm3d.__init__(self, **self.param_dict)
class LazyBatchNorm1d(nn.modules.batchnorm.LazyBatchNorm1d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm1d.__init__(self, **self.param_dict)
class LazyBatchNorm2d(nn.modules.batchnorm.LazyBatchNorm2d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm2d.__init__(self, **self.param_dict)
class LazyBatchNorm3d(nn.modules.batchnorm.LazyBatchNorm3d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm3d.__init__(self, **self.param_dict)
class SyncBatchNorm(nn.modules.batchnorm.SyncBatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['process_group'] = process_group
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.SyncBatchNorm.__init__(self, **self.param_dict)
class _BatchNorm(nn.modules.batchnorm._BatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._BatchNorm.__init__(self, **self.param_dict)
class _LazyNormBase(nn.modules.batchnorm._LazyNormBase, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm._LazyNormBase.__init__(self, **self.param_dict)
class _NormBase(nn.modules.batchnorm._NormBase, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._NormBase.__init__(self, **self.param_dict)
class ConstantPad1d(nn.modules.padding.ConstantPad1d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad1d.__init__(self, **self.param_dict)
class ConstantPad2d(nn.modules.padding.ConstantPad2d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad2d.__init__(self, **self.param_dict)
class ConstantPad3d(nn.modules.padding.ConstantPad3d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad3d.__init__(self, **self.param_dict)
class ReflectionPad1d(nn.modules.padding.ReflectionPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad1d.__init__(self, **self.param_dict)
class ReflectionPad2d(nn.modules.padding.ReflectionPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad2d.__init__(self, **self.param_dict)
class ReflectionPad3d(nn.modules.padding.ReflectionPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad3d.__init__(self, **self.param_dict)
class ReplicationPad1d(nn.modules.padding.ReplicationPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad1d.__init__(self, **self.param_dict)
class ReplicationPad2d(nn.modules.padding.ReplicationPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad2d.__init__(self, **self.param_dict)
class ReplicationPad3d(nn.modules.padding.ReplicationPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad3d.__init__(self, **self.param_dict)
class ZeroPad2d(nn.modules.padding.ZeroPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ZeroPad2d.__init__(self, **self.param_dict)
class _ConstantPadNd(nn.modules.padding._ConstantPadNd, FateTorchLayer):
def __init__(self, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding._ConstantPadNd.__init__(self, **self.param_dict)
class _ReflectionPadNd(nn.modules.padding._ReflectionPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReflectionPadNd.__init__(self, **self.param_dict)
class _ReplicationPadNd(nn.modules.padding._ReplicationPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReplicationPadNd.__init__(self, **self.param_dict)
class BCELoss(nn.modules.loss.BCELoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.BCELoss.__init__(self, **self.param_dict)
class BCEWithLogitsLoss(nn.modules.loss.BCEWithLogitsLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
pos_weight=None,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['pos_weight'] = pos_weight
self.param_dict.update(kwargs)
nn.modules.loss.BCEWithLogitsLoss.__init__(self, **self.param_dict)
class CTCLoss(nn.modules.loss.CTCLoss, FateTorchLoss):
def __init__(
self,
blank=0,
reduction='mean',
zero_infinity=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['blank'] = blank
self.param_dict['reduction'] = reduction
self.param_dict['zero_infinity'] = zero_infinity
self.param_dict.update(kwargs)
nn.modules.loss.CTCLoss.__init__(self, **self.param_dict)
class CosineEmbeddingLoss(nn.modules.loss.CosineEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.CosineEmbeddingLoss.__init__(self, **self.param_dict)
class CrossEntropyLoss(nn.modules.loss.CrossEntropyLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
label_smoothing=0.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['label_smoothing'] = label_smoothing
self.param_dict.update(kwargs)
nn.modules.loss.CrossEntropyLoss.__init__(self, **self.param_dict)
class GaussianNLLLoss(nn.modules.loss.GaussianNLLLoss, FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.GaussianNLLLoss.__init__(self, **self.param_dict)
class HingeEmbeddingLoss(nn.modules.loss.HingeEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.HingeEmbeddingLoss.__init__(self, **self.param_dict)
class HuberLoss(nn.modules.loss.HuberLoss, FateTorchLoss):
def __init__(self, reduction='mean', delta=1.0, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict['reduction'] = reduction
self.param_dict['delta'] = delta
self.param_dict.update(kwargs)
nn.modules.loss.HuberLoss.__init__(self, **self.param_dict)
class KLDivLoss(nn.modules.loss.KLDivLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
log_target=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['log_target'] = log_target
self.param_dict.update(kwargs)
nn.modules.loss.KLDivLoss.__init__(self, **self.param_dict)
class L1Loss(nn.modules.loss.L1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.L1Loss.__init__(self, **self.param_dict)
class MSELoss(nn.modules.loss.MSELoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MSELoss.__init__(self, **self.param_dict)
class MarginRankingLoss(nn.modules.loss.MarginRankingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MarginRankingLoss.__init__(self, **self.param_dict)
class MultiLabelMarginLoss(
nn.modules.loss.MultiLabelMarginLoss,
FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelMarginLoss.__init__(self, **self.param_dict)
class MultiLabelSoftMarginLoss(
nn.modules.loss.MultiLabelSoftMarginLoss,
FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelSoftMarginLoss.__init__(
self, **self.param_dict)
class MultiMarginLoss(nn.modules.loss.MultiMarginLoss, FateTorchLoss):
def __init__(
self,
p=1,
margin=1.0,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['p'] = p
self.param_dict['margin'] = margin
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiMarginLoss.__init__(self, **self.param_dict)
class NLLLoss(nn.modules.loss.NLLLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss.__init__(self, **self.param_dict)
class NLLLoss2d(nn.modules.loss.NLLLoss2d, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss2d.__init__(self, **self.param_dict)
class PoissonNLLLoss(nn.modules.loss.PoissonNLLLoss, FateTorchLoss):
def __init__(
self,
log_input=True,
full=False,
size_average=None,
eps=1e-08,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['log_input'] = log_input
self.param_dict['full'] = full
self.param_dict['size_average'] = size_average
self.param_dict['eps'] = eps
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.PoissonNLLLoss.__init__(self, **self.param_dict)
class SmoothL1Loss(nn.modules.loss.SmoothL1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
beta=1.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['beta'] = beta
self.param_dict.update(kwargs)
nn.modules.loss.SmoothL1Loss.__init__(self, **self.param_dict)
class SoftMarginLoss(nn.modules.loss.SoftMarginLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.SoftMarginLoss.__init__(self, **self.param_dict)
class TripletMarginLoss(nn.modules.loss.TripletMarginLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
p=2.0,
eps=1e-06,
swap=False,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['p'] = p
self.param_dict['eps'] = eps
self.param_dict['swap'] = swap
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginLoss.__init__(self, **self.param_dict)
class TripletMarginWithDistanceLoss(
nn.modules.loss.TripletMarginWithDistanceLoss,
FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginWithDistanceLoss.__init__(
self, **self.param_dict)
class _Loss(nn.modules.loss._Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._Loss.__init__(self, **self.param_dict)
class _WeightedLoss(nn.modules.loss._WeightedLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._WeightedLoss.__init__(self, **self.param_dict)
| 81,792 | 32.412173 | 82 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/interactive.py
|
import torch as t
from torch.nn import ReLU, Linear, LazyLinear, Tanh, Sigmoid, Dropout, Sequential
from pipeline.component.nn.backend.torch.base import FateTorchLayer
class InteractiveLayer(t.nn.Module, FateTorchLayer):
r"""A :class: InteractiveLayer.
An interface for InteractiveLayer. In interactive layer, the forward method is:
out = activation( Linear(guest_input) + Linear(host_0_input) + Linear(host_1_input) ..)
Args:
out_dim: int, the output dimension of InteractiveLayer
host_num: int, specify the number of host party, default is 1, need to modify this parameter
when running multi-party modeling
guest_dim: int or None, the input dimension of guest features, if None, will use LazyLinear layer
that automatically infers the input dimension
host_dim: int, or None:
int: the input dimension of all host features
None: automatically infer the input dimension of all host features
activation: str, support relu, tanh, sigmoid
dropout: float in 0-1, if None, dropout is disabled
guest_bias: bias for guest linear layer
host_bias: bias for host linear layers
need_guest: if false, will ignore the input of guest bottom model
"""
def __init__(
self,
out_dim,
guest_dim=None,
host_num=1,
host_dim=None,
activation='relu',
dropout=None,
guest_bias=True,
host_bias=True,
need_guest=True,
):
t.nn.Module.__init__(self)
FateTorchLayer.__init__(self)
self.activation = None
if activation is not None:
if activation.lower() == 'relu':
self.activation = ReLU()
elif activation.lower() == 'tanh':
self.activation = Tanh()
elif activation.lower() == 'sigmoid':
self.activation = Sigmoid()
else:
raise ValueError(
'activation not support {}, avail: relu, tanh, sigmoid'.format(activation))
self.dropout = None
if dropout is not None:
assert isinstance(dropout, float), 'dropout must be a float'
self.dropout = Dropout(p=dropout)
assert isinstance(out_dim, int), 'out_dim must be an int >= 0'
self.param_dict['out_dim'] = out_dim
self.param_dict['activation'] = activation
self.param_dict['dropout'] = dropout
self.param_dict['need_guest'] = need_guest
assert isinstance(
host_num, int) and host_num >= 1, 'host number is an int >= 1'
self.param_dict['host_num'] = host_num
if guest_dim is not None:
assert isinstance(guest_dim, int)
if host_dim is not None:
assert isinstance(host_dim, int)
self.guest_bias = guest_bias
self.param_dict['guest_dim'] = guest_dim
self.param_dict['host_dim'] = host_dim
self.param_dict['guest_bias'] = guest_bias
self.param_dict['host_bias'] = host_bias
if need_guest:
if guest_dim is None:
self.guest_model = LazyLinear(out_dim, guest_bias)
else:
self.guest_model = Linear(guest_dim, out_dim, guest_bias)
else:
self.guest_model = None
self.out_dim = out_dim
self.host_dim = host_dim
self.host_bias = host_bias
self.host_model = None
self.need_guest = need_guest
self.host_model = t.nn.ModuleList()
for i in range(host_num):
self.host_model.append(self.make_host_model())
if self.dropout is not None:
self.act_seq = Sequential(
self.activation,
self.dropout
)
else:
self.act_seq = Sequential(
self.activation
)
def lazy_to_linear(self, guest_dim=None, host_dims=None):
if isinstance(
self.guest_model,
t.nn.LazyLinear) and guest_dim is not None:
self.guest_model = t.nn.Linear(
guest_dim, self.out_dim, bias=self.guest_bias)
if isinstance(
self.host_model[0],
t.nn.LazyLinear) and host_dims is not None:
new_model_list = t.nn.ModuleList()
for dim in host_dims:
new_model_list.append(
t.nn.Linear(
dim,
self.out_dim,
bias=self.host_bias))
self.host_model = new_model_list
def make_host_model(self):
if self.host_dim is None:
return LazyLinear(self.out_dim, self.host_bias)
else:
return Linear(self.host_dim, self.out_dim, self.host_bias)
def forward(self, x_guest, x_host):
if self.need_guest:
g_out = self.guest_model(x_guest)
else:
g_out = 0
h_out = None
if isinstance(x_host, list):
for m, data in zip(self.host_model, x_host):
out_ = m(data)
if h_out is None:
h_out = out_
else:
h_out += out_
else:
h_out = self.host_model[0](x_host)
return self.activation(g_out + h_out)
| 5,522 | 34.178344 | 113 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/__init__.py
|
try:
from pipeline.component.nn.backend.torch import nn, init, operation, optim, serialization
except ImportError:
nn, init, operation, optim, serialization = None, None, None, None, None
__all__ = ['nn', 'init', 'operation', 'optim', 'serialization']
| 261 | 36.428571 | 93 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/operation.py
|
import torch
import torch as t
import copy
from torch.nn import Module
class OpBase(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
ret = copy.deepcopy(self.param_dict)
ret['op'] = type(self).__name__
return ret
class Astype(Module, OpBase):
def __init__(self, cast_type: str):
OpBase.__init__(self)
Module.__init__(self)
assert cast_type in [
'float',
'int',
'bool',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'float16']
self.param_dict['cast_type'] = cast_type
self.cast_type = cast_type
self.cast_type_map = {
'float': t.float,
'int': t.int,
'bool': t.bool,
'float32': t.float32,
'float64': t.float64,
'float16': t.float16,
'int8': t.int8,
'int16': t.int16,
'int32': t.int32,
'int64': t.int64,
}
def forward(self, tensor: t.Tensor, **kwargs):
return tensor.type(self.cast_type_map[self.cast_type])
class Flatten(Module, OpBase):
def __init__(self, start_dim=0, end_dim=-1):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict['start_dim'] = start_dim
self.param_dict['end_dim'] = end_dim
def forward(self, tensor):
return tensor.flatten(**self.param_dict)
class Reshape(Module, OpBase):
def __init__(self, shape):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(shape, tuple) or isinstance(shape, list)
self.shape = shape
self.param_dict['shape'] = list(shape)
def forward(self, tensor: t.Tensor):
return tensor.reshape(shape=self.shape)
class Index(Module, OpBase):
def __init__(self, index):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(index, int)
self.param_dict['index'] = index
def forward(self, content):
return content[self.param_dict['index']]
class Select(Module, OpBase):
def __init__(self, dim, idx):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'index': idx}
def forward(self, tensor):
return tensor.select(self.param_dict['dim'], self.param_dict['index'])
class SelectRange(Module, OpBase):
def __init__(self, dim, start, end):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'start': start, 'end': end}
def forward(self, tensor):
return tensor.select(
self.param_dict['dim'], -1)[self.param_dict['start']: self.param_dict['end']]
class Sum(Module, OpBase):
def __init__(self, dim):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(dim, int)
self.param_dict['dim'] = dim
def forward(self, tensor):
return tensor.sum(dim=self.param_dict['dim'])
class Squeeze(Module, OpBase):
def __init__(self, **kwargs):
OpBase.__init__(self)
Module.__init__(self)
def forward(self, tensor: t.Tensor):
return tensor.squeeze()
class Unsqueeze(Sum, OpBase):
def __init__(self, dim):
super(Unsqueeze, self).__init__(dim)
def forward(self, tensor: t.Tensor):
return tensor.unsqueeze(self.param_dict['dim'])
| 3,488 | 23.570423 | 89 |
py
|
FATE
|
FATE-master/python/fate_client/pipeline/component/nn/backend/torch/serialization.py
|
import copy
import inspect
from collections import OrderedDict
try:
from torch.nn import Sequential as tSeq
from pipeline.component.nn.backend.torch import optim, init, nn
from pipeline.component.nn.backend.torch import operation
from pipeline.component.nn.backend.torch.base import Sequential, get_torch_instance
from pipeline.component.nn.backend.torch.cust import CustModel, CustLoss
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def recover_layer_from_dict(nn_define, nn_dict):
init_param_dict = copy.deepcopy(nn_define)
if 'layer' in nn_define:
class_name = nn_define['layer']
init_param_dict.pop('layer')
elif 'op' in nn_define:
class_name = nn_define['op']
init_param_dict.pop('op')
else:
raise ValueError(
'no layer or operation info found in nn define, please check your layer config and make'
'sure they are correct for pytorch backend')
if 'initializer' in init_param_dict:
init_param_dict.pop('initializer')
# find corresponding class
if class_name == CustModel.__name__:
nn_layer_class = CustModel
elif class_name == InteractiveLayer.__name__:
nn_layer_class = InteractiveLayer
else:
nn_layer_class = nn_dict[class_name]
# create layer or Module
if nn_layer_class == CustModel: # converto to pytorch model
layer: CustModel = CustModel(module_name=init_param_dict['module_name'],
class_name=init_param_dict['class_name'],
**init_param_dict['param'])
layer = layer.get_pytorch_model()
elif nn_layer_class == InteractiveLayer:
layer: InteractiveLayer = InteractiveLayer(**init_param_dict)
else:
layer = get_torch_instance(nn_layer_class, init_param_dict)
# initialize if there are configs
if 'initializer' in nn_define:
if 'weight' in nn_define['initializer']:
init_para = nn_define['initializer']['weight']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, **init_para['param'])
if 'bias' in nn_define['initializer']:
init_para = nn_define['initializer']['bias']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, init='bias', **init_para['param'])
return layer, class_name
def recover_sequential_from_dict(nn_define):
nn_define_dict = nn_define
nn_dict = dict(inspect.getmembers(nn))
op_dict = dict(inspect.getmembers(operation))
nn_dict.update(op_dict)
class_name_list = []
try:
# submitted model have int prefixes, they make sure that layers are in
# order
add_dict = OrderedDict()
keys = list(nn_define_dict.keys())
keys = sorted(keys, key=lambda x: int(x.split('-')[0]))
for k in keys:
layer, class_name = recover_layer_from_dict(nn_define_dict[k], nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
except BaseException:
add_dict = OrderedDict()
for k, v in nn_define_dict.items():
layer, class_name = recover_layer_from_dict(v, nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
if len(class_name_list) == 1 and class_name_list[0] == CustModel.__name__:
# If there are only a CustModel, return the model only
return list(add_dict.values())[0]
else:
return tSeq(add_dict)
def recover_optimizer_from_dict(define_dict):
opt_dict = dict(inspect.getmembers(optim))
from federatedml.util import LOGGER
LOGGER.debug('define dict is {}'.format(define_dict))
if 'optimizer' not in define_dict:
raise ValueError('please specify optimizer type in the json config')
opt_class = opt_dict[define_dict['optimizer']]
param_dict = copy.deepcopy(define_dict)
if 'optimizer' in param_dict:
param_dict.pop('optimizer')
if 'config_type' in param_dict:
param_dict.pop('config_type')
return opt_class(**param_dict)
def recover_loss_fn_from_dict(define_dict):
loss_fn_dict = dict(inspect.getmembers(nn))
if 'loss_fn' not in define_dict:
raise ValueError('please specify loss function in the json config')
param_dict = copy.deepcopy(define_dict)
param_dict.pop('loss_fn')
if define_dict['loss_fn'] == CustLoss.__name__:
return CustLoss(loss_module_name=param_dict['loss_module_name'],
class_name=param_dict['class_name'],
**param_dict['param']).get_pytorch_model()
else:
return loss_fn_dict[define_dict['loss_fn']](**param_dict)
if __name__ == '__main__':
pass
| 4,867 | 37.03125 | 100 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from pathlib import Path
import click
from ruamel import yaml
from flow_client.flow_cli.commands import (
checkpoint, component, data, job, key, model, privilege, provider, queue,
resource, server, service, table, tag, task, template, test, tracking,
)
from flow_client.flow_cli.utils.cli_utils import prettify
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(short_help='Fate Flow Client', context_settings=CONTEXT_SETTINGS)
@click.pass_context
def flow_cli(ctx):
'''
Fate Flow Client
'''
ctx.ensure_object(dict)
if ctx.invoked_subcommand == 'init':
return
with open(os.path.join(os.path.dirname(__file__), 'settings.yaml'), 'r') as fin:
config = yaml.safe_load(fin)
if not config.get('api_version'):
raise ValueError('api_version in config is required')
ctx.obj['api_version'] = config['api_version']
is_server_conf_exist = False
if config.get('server_conf_path'):
conf_path = Path(config['server_conf_path'])
is_server_conf_exist = conf_path.is_file()
if is_server_conf_exist:
server_conf = yaml.safe_load(conf_path.read_text('utf-8'))
local_conf_path = conf_path.with_name(f'local.{conf_path.name}')
if local_conf_path.is_file():
server_conf.update(yaml.safe_load(local_conf_path.read_text('utf-8')))
ctx.obj['ip'] = server_conf['fateflow']['host']
ctx.obj['http_port'] = int(server_conf['fateflow']['http_port'])
ctx.obj['server_url'] = f'http://{ctx.obj["ip"]}:{ctx.obj["http_port"]}/{ctx.obj["api_version"]}'
http_app_key = None
http_secret_key = None
if server_conf.get('authentication', {}).get('client', {}).get('switch'):
http_app_key = server_conf['authentication']['client']['http_app_key']
http_secret_key = server_conf['authentication']['client']['http_secret_key']
else:
http_app_key = server_conf.get('fateflow', {}).get('http_app_key')
http_secret_key = server_conf.get('fateflow', {}).get('http_secret_key')
if http_app_key and http_secret_key:
ctx.obj['app_key'] = http_app_key
ctx.obj['secret_key'] = http_secret_key
elif config.get('ip') and config.get('port'):
ctx.obj['ip'] = config['ip']
ctx.obj['http_port'] = int(config['port'])
ctx.obj['server_url'] = f'http://{ctx.obj["ip"]}:{ctx.obj["http_port"]}/{config["api_version"]}'
if config.get('app_key') and config.get('secret_key'):
ctx.obj['app_key'] = config['app_key']
ctx.obj['secret_key'] = config['secret_key']
else:
raise ValueError('Invalid configuration file. Did you run "flow init"?')
ctx.obj['initialized'] = is_server_conf_exist or (config.get('ip') and config.get('port'))
@flow_cli.command('init', short_help='Flow CLI Init Command')
@click.option('-c', '--server-conf-path', type=click.Path(exists=True),
help='Server configuration file absolute path.')
@click.option('--ip', type=click.STRING, help='Fate flow server ip address.')
@click.option('--port', type=click.INT, help='Fate flow server port.')
@click.option('--app-key', type=click.STRING, help='APP key for sign requests.')
@click.option('--secret-key', type=click.STRING, help='Secret key for sign requests.')
@click.option('--reset', is_flag=True, default=False,
help='If specified, initialization settings would be reset to none. Users should init flow again.')
def initialization(**kwargs):
'''
\b
- DESCRIPTION:
Flow CLI Init Command. Custom can choose to provide an absolute path of server conf file,
or provide ip address and http port of a valid fate flow server. Notice that, if custom
provides both, the server conf would be loaded in priority. In this case, ip address and
http port would be ignored.
\b
- USAGE:
flow init -c /data/projects/fate/python/conf/service_conf.yaml
flow init --ip 127.0.0.1 --port 9380
'''
with open(os.path.join(os.path.dirname(__file__), 'settings.yaml'), 'r') as fin:
config = yaml.safe_load(fin)
if kwargs.get('reset'):
config['api_version'] = 'v1'
for i in ('server_conf_path', 'ip', 'port', 'app_key', 'secret_key'):
config[i] = None
with open(os.path.join(os.path.dirname(__file__), 'settings.yaml'), 'w') as fout:
yaml.dump(config, fout, Dumper=yaml.RoundTripDumper)
prettify(
{
'retcode': 0,
'retmsg': 'Fate Flow CLI has been reset successfully. '
'Please do initialization again before you using flow CLI v2.'
}
)
else:
config['api_version'] = 'v1'
if kwargs.get('server_conf_path'):
config['server_conf_path'] = os.path.abspath(kwargs['server_conf_path'])
for i in ('ip', 'port', 'app_key', 'secret_key'):
if kwargs.get(i):
config[i] = kwargs[i]
if config.get('server_conf_path') or (config.get('ip') and config.get('port')):
with open(os.path.join(os.path.dirname(__file__), 'settings.yaml'), 'w') as fout:
yaml.dump(config, fout, Dumper=yaml.RoundTripDumper)
prettify(
{
'retcode': 0,
'retmsg': 'Fate Flow CLI has been initialized successfully.'
}
)
else:
prettify(
{
'retcode': 100,
'retmsg': 'Fate Flow CLI initialization failed. Please provides server configuration file path '
'or server http ip address and port information.'
}
)
flow_cli.add_command(server.server)
flow_cli.add_command(service.service)
flow_cli.add_command(provider.provider)
flow_cli.add_command(tracking.tracking)
flow_cli.add_command(component.component)
flow_cli.add_command(data.data)
flow_cli.add_command(job.job)
flow_cli.add_command(model.model)
flow_cli.add_command(resource.resource)
flow_cli.add_command(privilege.privilege)
flow_cli.add_command(queue.queue)
flow_cli.add_command(task.task)
flow_cli.add_command(table.table)
flow_cli.add_command(tag.tag)
flow_cli.add_command(checkpoint.checkpoint)
flow_cli.add_command(test.test)
flow_cli.add_command(template.template)
flow_cli.add_command(key.key)
if __name__ == '__main__':
flow_cli()
| 7,142 | 38.464088 | 116 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/checkpoint.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help='Checkpoint Operations')
@click.pass_context
def checkpoint(ctx, **kwargs):
pass
@checkpoint.command('list', short_help='List checkpoints')
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def list_checkpoints(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'checkpoint/list', config_data)
@checkpoint.command('get', short_help='Get a checkpoint by step_index or step_name')
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.option('--step-index', help='Step index', type=click.INT)
@click.option('--step-name', help='Step name', type=click.STRING)
@click.pass_context
def get_checkpoint(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
if len(config_data.keys() & {'step_index', 'step_name'}) != 1:
click.echo("Error: Missing option '--step-index' or '--step-name'.", err=True)
sys.exit(2)
access_server('post', ctx, 'checkpoint/get', config_data)
| 1,956 | 33.333333 | 86 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import prettify
from flow_sdk.client import FlowClient
from pipeline.backend.pipeline import PipeLine
from pipeline.component import (
DataTransform, Evaluation, HeteroLR,
HeteroSecureBoost, Intersection, Reader,
)
from pipeline.interface import Data
@click.group(short_help="FATE Flow Test Operations")
@click.pass_context
def test(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@test.command("toy", short_help="Toy Test Command")
@cli_args.GUEST_PARTYID_REQUIRED
@cli_args.HOST_PARTYID_REQUIRED
@cli_args.TIMEOUT
@cli_args.TASK_CORES
@click.pass_context
def toy(ctx, **kwargs):
flow_sdk = FlowClient(ip=ctx.obj["ip"], port=ctx.obj["http_port"], version=ctx.obj["api_version"],
app_key=ctx.obj.get("app_key"), secret_key=ctx.obj.get("secret_key"))
submit_result = flow_sdk.test.toy(**kwargs)
if submit_result["retcode"] == 0:
for t in range(kwargs["timeout"]):
job_id = submit_result["jobId"]
r = flow_sdk.job.query(job_id=job_id, role="guest", party_id=kwargs["guest_party_id"])
if r["retcode"] == 0 and len(r["data"]):
job_status = r["data"][0]["f_status"]
print(f"toy test job {job_id} is {job_status}")
if job_status in {"success", "failed", "canceled"}:
check_log(flow_sdk, kwargs["guest_party_id"], job_id, job_status)
break
time.sleep(1)
else:
print(f"check job status timeout")
check_log(flow_sdk, kwargs["guest_party_id"], job_id, job_status)
else:
prettify(submit_result)
def check_log(flow_sdk, party_id, job_id, job_status):
r = flow_sdk.job.log(job_id=job_id, output_path="./logs/toy")
if r["retcode"] == 0:
log_msg = flow_sdk.test.check_toy(party_id, job_status, r["directory"])
try:
for msg in log_msg:
print(msg)
except BaseException:
print(f"auto check log failed, please check {r['directory']}")
else:
print(f"get log failed, please check PROJECT_BASE/logs/{job_id} on the fateflow server machine")
@test.command("min", short_help="Min Test Command")
@click.option("-t", "--data-type", type=click.Choice(["fast", "normal"]), default="fast", show_default=True,
help="fast for breast data, normal for default credit data")
@click.option("--sbt/--no-sbt", is_flag=True, default=True, show_default=True, help="run sbt test or not")
@cli_args.GUEST_PARTYID_REQUIRED
@cli_args.HOST_PARTYID_REQUIRED
@cli_args.ARBITER_PARTYID_REQUIRED
@click.pass_context
def run_min_test(ctx, data_type, sbt, guest_party_id, host_party_id, arbiter_party_id, **kwargs):
guest_party_id = int(guest_party_id)
host_party_id = int(host_party_id)
arbiter_party_id = int(arbiter_party_id)
if data_type == "fast":
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
auc_base = 0.98
elif data_type == "normal":
guest_train_data = {"name": "default_credit_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "default_credit_hetero_host", "namespace": "experiment"}
auc_base = 0.69
else:
click.echo(f"data type {data_type} not supported", err=True)
raise click.Abort()
lr_pipeline = lr_train_pipeline(guest_party_id, host_party_id, arbiter_party_id, guest_train_data, host_train_data)
lr_auc = get_auc(lr_pipeline, "hetero_lr_0")
if lr_auc < auc_base:
click.echo(f"Warning: The LR auc {lr_auc} is lower than expect value {auc_base}")
predict_pipeline(lr_pipeline, guest_party_id, host_party_id, guest_train_data, host_train_data)
if sbt:
sbt_pipeline = sbt_train_pipeline(guest_party_id, host_party_id, guest_train_data, host_train_data)
sbt_auc = get_auc(sbt_pipeline, "hetero_secureboost_0")
if sbt_auc < auc_base:
click.echo(f"Warning: The SBT auc {sbt_auc} is lower than expect value {auc_base}")
predict_pipeline(sbt_pipeline, guest_party_id, host_party_id, guest_train_data, host_train_data)
def lr_train_pipeline(guest, host, arbiter, guest_train_data, host_train_data):
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
lr_param = {
"penalty": "L2",
"tol": 0.0001,
"alpha": 0.01,
"optimizer": "rmsprop",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros",
"fit_intercept": True,
},
"max_iter": 30,
"early_stop": "diff",
"encrypt_param": {
"key_length": 1024,
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False,
},
"validation_freqs": 3,
}
hetero_lr_0 = HeteroLR(name="hetero_lr_0", **lr_param)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))
pipeline.compile()
pipeline.fit()
return pipeline
def sbt_train_pipeline(guest, host, guest_train_data, host_train_data):
pipeline = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role="guest", party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role="host", party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
sbt_param = {
"task_type": "classification",
"objective_param": {
"objective": "cross_entropy",
},
"num_trees": 3,
"validation_freqs": 1,
"encrypt_param": {
"method": "paillier",
},
"tree_param": {
"max_depth": 3,
}
}
hetero_secure_boost_0 = HeteroSecureBoost(name="hetero_secureboost_0", **sbt_param)
evaluation_0 = Evaluation(name="evaluation_0", eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))
pipeline.compile()
pipeline.fit()
return pipeline
def get_auc(pipeline, component_name):
cpn_summary = pipeline.get_component(component_name).get_summary()
auc = cpn_summary.get("validation_metrics").get("train").get("auc")[-1]
return auc
def predict_pipeline(train_pipeline, guest, host, guest_train_data, host_train_data):
cpn_list = train_pipeline.get_component_list()[1:]
train_pipeline.deploy_component(cpn_list)
pipeline = PipeLine()
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role="guest", party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role="host", party_id=host).component_param(table=host_train_data)
pipeline.add_component(reader_0)
pipeline.add_component(train_pipeline, data=Data(predict_input={
train_pipeline.data_transform_0.input.data: reader_0.output.data}))
pipeline.predict()
| 9,661 | 38.761317 | 120 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/resource.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Resource Manager")
@click.pass_context
def resource(ctx):
"""
\b
Provides numbers of resource operational commands, including query and return.
For more details, please check out the help text.
"""
pass
@resource.command("query", short_help="Query Resource Command")
@click.pass_context
def query(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Resource Information.
\b
- USAGE:
flow resource query
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'resource/query', config_data)
@resource.command("return", short_help="Return Job Resource Command")
@cli_args.JOBID
@click.pass_context
def resource_return(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Return Job Resource Command
\b
- USAGE:
flow resource return -j $JobId
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'resource/return', config_data)
| 1,750 | 26.793651 | 82 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Table Operations")
@click.pass_context
def table(ctx):
"""
\b
Provides numbers of table operational commands, including info and delete.
For more details, please check out the help text.
"""
pass
@table.command("info", short_help="Query Table Command")
@cli_args.NAMESPACE_REQUIRED
@cli_args.TABLE_NAME_REQUIRED
@click.pass_context
def info(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Table Information.
\b
- USAGE:
flow table info -n $NAMESPACE -t $TABLE_NAME
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'table/table_info', config_data)
@table.command("delete", short_help="Delete Table Command")
@cli_args.NAMESPACE
@cli_args.TABLE_NAME
@cli_args.JOBID
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.COMPONENT_NAME
@click.pass_context
def delete(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Delete A Specified Table.
\b
- USAGE:
flow table delete -n $NAMESPACE -t $TABLE_NAME
flow table delete -j $JOB_ID -r guest -p 9999
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'table/delete', config_data)
@table.command("disable", short_help="Disable Table Command")
@cli_args.NAMESPACE
@cli_args.TABLE_NAME
@click.pass_context
def disable(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Disable A Specified Table.
\b
- USAGE:
flow table disable -n $NAMESPACE -t $TABLE_NAME
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'table/disable', config_data)
@table.command("enable", short_help="Disable Table Command")
@cli_args.NAMESPACE
@cli_args.TABLE_NAME
@click.pass_context
def disable(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Enable A Specified Table.
\b
- USAGE:
flow table enable -n $NAMESPACE -t $TABLE_NAME
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'table/enable', config_data)
@table.command("disable-delete", short_help="Delete Disable Table Command")
@click.pass_context
def disable(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Delete Disable A Specified Table.
\b
- USAGE:
flow table disable-delete
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'table/disable/delete', config_data)
@table.command("add", short_help="Add Table Command")
@cli_args.CONF_PATH
@click.pass_context
def add(ctx, **kwargs):
"""
- DESCRIPTION:
\b
Add a address to fate address.
Used to be 'table_add'.
\b
- USAGE:
flow table add -c fate_flow/examples/bind_hdfs_table.json
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/add', config_data)
@table.command("bind", short_help="Bind Table Command")
@cli_args.CONF_PATH
@click.option('--drop', is_flag=True, default=False,
help="If specified, data of old version would be replaced by the current version. "
"Otherwise, current upload task would be rejected. (default: False)")
@click.pass_context
def bind(ctx, **kwargs):
"""
- DESCRIPTION:
\b
Bind a address to fate address.
Used to be 'table_bind'.
\b
- USAGE:
flow table bind -c fate_flow/examples/bind_hdfs_table.json
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/bind', config_data)
@table.command("connector-create", short_help="create or update connector")
@cli_args.CONF_PATH
@click.pass_context
def connector_create_or_update(ctx, **kwargs):
"""
- DESCRIPTION:
\b
Create a connector to fate address.
\b
- USAGE:
flow table connector-create -c fateflow/examples/connector/create_or_update.json
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/connector/create', config_data)
@table.command("connector-query", short_help="query connector info")
@cli_args.CONNECTOR_NAME
@click.pass_context
def connector_query(ctx, **kwargs):
"""
- DESCRIPTION:
\b
query connector info.
\b
- USAGE:
flow table connector-query --connector-name xxx
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/connector/query', config_data)
@table.command("tracking-source", short_help="Tracking Source Table")
@cli_args.NAMESPACE
@cli_args.TABLE_NAME
@click.pass_context
def tracking_source(ctx, **kwargs):
"""
- DESCRIPTION:
\b
tracking a table's parent table
\b
- USAGE:
flow table tracking_source -n $NAMESPACE -t $TABLE_NAME
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/tracking/source', config_data)
@table.command("tracking-job", short_help="Tracking Using Table Job")
@cli_args.NAMESPACE
@cli_args.TABLE_NAME
@click.pass_context
def tracking_job(ctx, **kwargs):
"""
- DESCRIPTION:
\b
tracking jobs of using table
\b
- USAGE:
flow table tracking_job -n $NAMESPACE -t $TABLE_NAME
"""
config_data, _ = preprocess(**kwargs)
access_server('post', ctx, 'table/tracking/job', config_data)
| 6,001 | 24.218487 | 97 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/task.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Task Operations")
@click.pass_context
def task(ctx):
"""
\b
Provides numbers of task operational commands, including list and query.
For more details, please check out the help text.
"""
pass
@task.command("list", short_help="List Task Command")
@cli_args.LIMIT
@click.pass_context
def list_task(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List Task description
\b
- USAGE:
flow task list
flow task list -l 25
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/list/task', config_data)
@task.command("query", short_help="Query Task Command")
@cli_args.JOBID
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.COMPONENT_NAME
@cli_args.STATUS
@click.pass_context
def query(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Task Command.
\b
- USAGE:
flow task query -j $JOB_ID -p 9999 -r guest
flow task query -cpn hetero_feature_binning_0 -s success
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/task/query', config_data)
| 1,886 | 25.957143 | 76 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/queue.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils.cli_utils import access_server
@click.group(short_help="Queue Operations")
@click.pass_context
def queue(ctx):
"""
\b
Provides a queue operational command, which is 'clean'.
For more details, please check out the help text.
"""
pass
@queue.command("clean", short_help="Clean Queue Command")
@click.pass_context
def clean(ctx):
"""
\b
- DESCRIPTION:
Queue Clean Command
\b
- USAGE:
flow queue clean
"""
access_server('post', ctx, "job/clean/queue")
| 1,182 | 25.886364 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/template.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import click
import requests
from contextlib import closing
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (preprocess, download_from_request, access_server, prettify)
@click.group(short_help="Template Operations")
@click.pass_context
def template(ctx):
"""
\b
fate template file download
"""
pass
@template.command("download", short_help="Template Download Command")
@cli_args.MIN_DATA
@cli_args.OUTPUT_PATH
@click.pass_context
def download(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download template conf/dsl/data files
\b
- USAGE:
flow template download --min-data 1 --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
tar_file_name = 'template.tar.gz'
extract_dir = config_data['output_path']
with closing(access_server('post', ctx, 'template/download', config_data, False, stream=True)) as response:
if response.status_code == 200:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
res = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
else:
res = response.json() if isinstance(response, requests.models.Response) else response
prettify(res)
| 2,044 | 31.983871 | 111 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/server.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (preprocess, access_server)
@click.group(short_help="FATE Flow Server Operations")
@click.pass_context
def server(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@server.command("versions", short_help="Show Versions Command")
@click.pass_context
def versions(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'server/version/get', config_data)
@server.command("reload", short_help="Reload Server Command")
@click.pass_context
def reload(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'server/reload', config_data)
| 1,482 | 31.23913 | 94 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from datetime import datetime
import click
import requests
from flow_client.flow_cli.utils import cli_args
from contextlib import closing
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server, prettify, get_project_base_directory, \
check_abs_path
@click.group(short_help="Model Operations")
@click.pass_context
def model(ctx):
"""
\b
Provides numbers of model operational commands, including load, store, import and etc.
For more details, please check out the help text.
"""
pass
@model.command("load", short_help="Load Model Command")
@cli_args.JOBID
@click.option("-c", "--conf-path", type=click.Path(exists=True),
help="Configuration file path.")
@click.pass_context
def load(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Load Model Command
\b
- USAGE:
flow model load -c fate_flow/examples/publish_load_model.json
flow model load -j $JOB_ID
"""
if not kwargs.get("conf_path") and not kwargs.get("job_id"):
prettify(
{
"retcode": 100,
"retmsg": "Load model failed. No arguments received, "
"please provide one of arguments from job id and conf path."
}
)
else:
if kwargs.get("conf_path") and kwargs.get("job_id"):
prettify(
{
"retcode": 100,
"retmsg": "Load model failed. Please do not provide job id and "
"conf path at the same time."
}
)
else:
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/load', config_data)
@model.command("bind", short_help="Bind Model Command")
@cli_args.JOBID
@cli_args.CONF_PATH
@click.pass_context
def bind(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Bind Model Command
\b
- USAGE:
flow model bind -c fate_flow/examples/bind_model_service.json
flow model bind -c fate_flow/examples/bind_model_service.json -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/bind', config_data)
@model.command("import", short_help="Import Model Command")
@cli_args.CONF_PATH
@click.option('--from-database', is_flag=True, default=False,
help="If specified and there is a valid database environment, fate flow will import model from database "
"which you specified in configuration file.")
@click.pass_context
def import_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Import the model from a file or storage engine.
\b
- USAGE:
flow model import -c fate_flow/examples/import_model.json
flow model import -c fate_flow/examples/restore_model.json --from-database
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.pop('from_database'):
access_server('post', ctx, 'model/restore', config_data)
return
file_path = config_data.get("file", None)
if not file_path:
prettify({
'retcode': 100,
'retmsg': "Import model failed. Please specify the valid model file path and try again."
})
return
if not os.path.isabs(file_path):
file_path = os.path.join(get_project_base_directory(), file_path)
if not os.path.exists(file_path):
prettify({
'retcode': 100,
'retmsg': 'Import model failed. The file is obtained from the fate flow client machine, '
'but it does not exist, please check the path: {}'.format(file_path),
})
config_data['force_update'] = int(config_data.get('force_update', False))
files = {'file': open(file_path, 'rb')}
access_server('post', ctx, 'model/import', data=config_data, files=files)
@model.command("export", short_help="Export Model Command")
@cli_args.CONF_PATH
@click.option('--to-database', is_flag=True, default=False,
help="If specified and there is a valid database environment, fate flow will export model to database "
"which you specified in configuration file.")
@click.pass_context
def export_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Export the model to a file or storage engine.
\b
- USAGE:
flow model export -c fate_flow/examples/export_model.json
flow model export -c fate_flow/examples/store_model.json --to-database
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('to_database'):
with closing(access_server('get', ctx, 'model/export', config_data, False, stream=True)) as response:
if response.status_code == 200:
archive_file_name = re.findall("filename=(.+)", response.headers["Content-Disposition"])[0]
os.makedirs(config_data["output_path"], exist_ok=True)
archive_file_path = os.path.join(config_data["output_path"], archive_file_name)
with open(archive_file_path, 'wb') as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
response_dict = {'retcode': 0,
'file': archive_file_path,
'retmsg': 'download successfully, please check {}'.format(archive_file_path)}
else:
response_dict = response.json() if isinstance(response, requests.models.Response) else response.json
prettify(response_dict)
else:
access_server('post', ctx, 'model/store', config_data)
@model.command("migrate", short_help="Migrate Model Command")
@cli_args.CONF_PATH
@click.pass_context
def migrate(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Migrate Model Command.
\b
- USAGE:
flow model migrate -c fate_flow/examples/migrate_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/migrate', config_data)
@model.command("tag-model", short_help="Tag Model Command")
@cli_args.JOBID_REQUIRED
@cli_args.TAG_NAME_REQUIRED
@click.option("--remove", is_flag=True, default=False,
help="If specified, the name of specified model will be "
"removed from the model name list of specified tag")
@click.pass_context
def tag_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Tag Model Command.
By default, custom can execute this command to tag model. Or custom could
specify the 'remove' flag to remove the tag from model.
\b
- USAGE:
flow model tag-model -j $JOB_ID -t $TAG_NAME
flow model tag-model -j $JOB_ID -t $TAG_NAME --remove
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('remove'):
access_server('post', ctx, 'model/model_tag/create', config_data)
else:
access_server('post', ctx, 'model/model_tag/remove', config_data)
@model.command("tag-list", short_help="List Tags of Model Command")
@cli_args.JOBID_REQUIRED
@click.pass_context
def list_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List Tags of Model Command.
Custom can query the model by a valid job id, and get the tag list of the specified model.
\b
- USAGE:
flow model tag-list -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/model_tag/retrieve', config_data)
@model.command("get-predict-dsl", short_help="Get predict dsl of model")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def get_predict_dsl(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get predict DSL of the model.
\b
- USAGE:
flow model get-predict-dsl --model-id $MODEL_ID --model-version $MODEL_VERSION -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
dsl_filename = "predict_dsl_{}.json".format(datetime.now().strftime('%Y%m%d%H%M%S'))
output_path = os.path.join(check_abs_path(kwargs.get("output_path")), dsl_filename)
config_data["filename"] = dsl_filename
with closing(access_server('post', ctx, 'model/get/predict/dsl', config_data, False, stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "wb") as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
res = {'retcode': 0,
'retmsg': "Query predict dsl successfully. "
"File path is: {}".format(output_path)}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': "Query predict dsl failed."
"For more details, please check logs/fate_flow/fate_flow_stat.log"}
prettify(res)
@model.command("get-predict-conf", short_help="Get predict conf template")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def get_predict_conf(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get the template of predict config.
\b
- USAGE:
flow model get-predict-conf --model-id $MODEL_ID --model-version $MODEL_VERSION -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
conf_filename = "predict_conf_{}.json".format(datetime.now().strftime('%Y%m%d%H%M%S'))
output_path = os.path.join(check_abs_path(kwargs.get("output_path")), conf_filename)
config_data["filename"] = conf_filename
with closing(access_server('post', ctx, 'model/get/predict/conf', config_data, False, stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "wb") as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
res = {'retcode': 0,
'retmsg': "Query predict conf successfully. "
"File path is: {}".format(output_path)}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': "Query predict conf failed."
"For more details, please check logs/fate_flow/fate_flow_stat.log"}
prettify(res)
@model.command("deploy", short_help="Deploy model")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@click.option("--cpn-list", type=click.STRING,
help="User inputs a string to specify component list")
@click.option("--cpn-path", type=click.Path(exists=True),
help="User specifies a file path which records the component list.")
@click.option("--dsl-path", type=click.Path(exists=True),
help="User specified predict dsl file")
@click.option("--cpn-step-index", type=click.STRING, multiple=True,
help="Specify a checkpoint model to replace the pipeline model. "
"Use : to separate component name and step index (E.g. --cpn-step-index cpn_a:123)")
@click.option("--cpn-step-name", type=click.STRING, multiple=True,
help="Specify a checkpoint model to replace the pipeline model. "
"Use : to separate component name and step name (E.g. --cpn-step-name cpn_b:foobar)")
@click.pass_context
def deploy(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Deploy model.
\b
- USAGE:
flow model deploy --model-id $MODEL_ID --model-version $MODEL_VERSION
"""
request_data = {
'model_id': kwargs['model_id'],
'model_version': kwargs['model_version'],
}
if kwargs.get("cpn_list") or kwargs.get("cpn_path"):
if kwargs.get("cpn_list"):
cpn_str = kwargs["cpn_list"]
elif kwargs.get("cpn_path"):
with open(kwargs["cpn_path"], "r") as fp:
cpn_str = fp.read()
else:
cpn_str = ""
if isinstance(cpn_str, list):
cpn_list = cpn_str
else:
if (cpn_str.find("/") and cpn_str.find("\\")) != -1:
raise Exception("Component list string should not contain '/' or '\\'.")
cpn_str = cpn_str.replace(" ", "").replace("\n", "").strip(",[]")
cpn_list = cpn_str.split(",")
request_data['cpn_list'] = cpn_list
elif kwargs.get("dsl_path"):
with open(kwargs["dsl_path"], "r") as ft:
predict_dsl = ft.read()
request_data['dsl'] = predict_dsl
request_data['components_checkpoint'] = {}
for i in ('cpn_step_index', 'cpn_step_name'):
for j in kwargs[i]:
component, checkpoint = j.rsplit(':', 1)
if i == 'cpn_step_index':
checkpoint = int(checkpoint)
if component in request_data['components_checkpoint']:
raise KeyError(f"Duplicated component name '{component}'.")
request_data['components_checkpoint'][component] = {
i[4:]: checkpoint,
}
config_data, dsl_data = preprocess(**request_data)
access_server('post', ctx, 'model/deploy', config_data)
@model.command("get-model-info", short_help="Get model info")
@cli_args.MODEL_ID
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.ROLE
@cli_args.PARTYID
@click.option('--detail', is_flag=True, default=False,
help="If specified, details of model will be shown.")
@click.pass_context
def get_model_info(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get model information.
\b
- USAGE:
flow model model-info --model-id $MODEL_ID --model-version $MODEL_VERSION
flow model model-info --model-id $MODEL_ID --model-version $MODEL_VERSION --detail
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('detail'):
config_data['query_filters'] = ['create_date', 'role', 'party_id', 'roles', 'model_id',
'model_version', 'loaded_times', 'size', 'description', 'parent', 'parent_info']
access_server('post', ctx, 'model/query', config_data)
@model.command("homo-convert", short_help="Convert trained homogenous model")
@cli_args.CONF_PATH
@click.pass_context
def homo_convert_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Convert trained homogenous model to the format of another ML framework. Converted model files
will be saved alongside the original model and can be downloaded via model export command.
The supported conversions are:
HomoLR to `sklearn.linear_model.LogisticRegression`
HomoNN to `tf.keras.Sequential` or `torch.nn.Sequential`, depending on the originally-used backend type.
\b
- USAGE:
flow model homo-convert -c fate_flow/examples/homo_convert_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/homo/convert', config_data)
@model.command("homo-deploy", short_help="Deploy trained homogenous model")
@cli_args.CONF_PATH
@click.pass_context
def homo_deploy_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Deploy trained homogenous model to a target online serving system. The model must be
converted beforehand.
Currently the supported target serving system is KFServing. Refer to the example json
for detailed parameters.
\b
- USAGE:
flow model homo-deploy -c fate_flow/examples/homo_deploy_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('deployment_type') == "kfserving":
kube_config = config_data.get('deployment_parameters', {}).get('config_file')
if kube_config:
if check_abs_path(kube_config):
with open(kube_config, 'r') as fp:
config_data['deployment_parameters']['config_file_content'] = fp.read()
del config_data['deployment_parameters']['config_file']
else:
prettify(
{
"retcode": 100,
"retmsg": "The kube_config file is obtained from the fate flow client machine, "
"but it does not exist. Please check the path: {}".format(kube_config)
}
)
return
access_server('post', ctx, 'model/homo/deploy', config_data)
| 17,583 | 35.941176 | 120 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/privilege.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Privilege Operations")
@click.pass_context
def privilege(ctx):
"""
\b
Provides numbers of privilege operational commands, including grant, query and delete.
For more details, please check out the help text.
"""
pass
@privilege.command("grant", short_help="Grant Privilege Command")
@cli_args.CONF_PATH
@click.pass_context
def grant(ctx, **kwargs):
"""
- DESCRIPTION:
\b
grant component | dataset privilege
\b
- USAGE:
flow privilege grant -c fateflow/examples/permission/grant.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'permission/grant', config_data)
@privilege.command("delete", short_help="Delete Privilege Command")
@cli_args.CONF_PATH
@click.pass_context
def delete(ctx, **kwargs):
"""
- DESCRIPTION:
\b
delete component | dataset privilege
\b
- USAGE:
flow privilege delete -c fateflow/examples/permission/delete.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'permission/delete', config_data)
@privilege.command("query", short_help="Query Privilege Command")
@cli_args.PARTYID_REQUIRED
@click.pass_context
def query(ctx, **kwargs):
"""
- DESCRIPTION:
\b
query component | dataset privilege
\b
- USAGE:
flow privilege query -p 10000
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'permission/query', config_data)
| 2,272 | 25.126437 | 90 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/tag.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Tag Operations")
@click.pass_context
def tag(ctx):
"""
\b
Provides numbers of model tags operational commands.
For more details, please check out the help text.
"""
pass
@tag.command("create", short_help="Create Tag Command")
@cli_args.TAG_NAME_REQUIRED
@cli_args.TAG_DESCRIPTION
@click.pass_context
def create_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Create Tag Command.
\b
- USAGE:
flow tag create -t $TAG_NAME -d $TEST_DESCRIPTION
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/tag/create', config_data)
@tag.command("query", short_help="Retrieve Tag Command")
@cli_args.TAG_NAME_REQUIRED
@click.option("--with-model", is_flag=True, default=False,
help="If specified, the information of models which have the "
"tag custom queried would be displayed.")
@click.pass_context
def query_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Retrieve Tag Command.
\b
- USAGE:
flow tag query -t $TAG_NAME
flow tag query -t $TAG_NAME --with-model
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/tag/retrieve', config_data)
@tag.command("update", short_help="Update Tag Command")
@cli_args.TAG_NAME_REQUIRED
@click.option("--new-tag-name", type=click.STRING, required=False,
help="New Tag Name.")
@click.option("--new-tag-desc", type=click.STRING, required=False,
help="New Tag Description. Note that if there are some whitespaces in description, "
"please make sure the description text is enclosed in double quotation marks.")
@click.pass_context
def update_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Update Tag Command.
\b
- USAGE:
flow tag update -t tag1 --new-tag-name tag2
flow tag update -t tag1 --new-tag-desc "This is the new description."
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/tag/update', config_data)
@tag.command("delete", short_help="Delete Tag Command")
@cli_args.TAG_NAME_REQUIRED
@click.pass_context
def delete_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Delete Tag Command. Notice that the information of model would not be discarded even though the tag is removed.
\b
- USAGE:
flow tag delete -t tag1
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/tag/destroy', config_data)
@tag.command("list", short_help="List Tag Command")
@cli_args.LIMIT
@click.pass_context
def list_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List Tag Command.
\b
- USAGE:
flow tag list
flow tag list -l 3
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/tag/list', config_data)
| 3,699 | 27.90625 | 119 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/data.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
import os
import sys
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server, check_abs_path, prettify
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
import json
@click.group(short_help="Data Operations")
@click.pass_context
def data(ctx):
"""
\b
Provides numbers of data operational commands, including upload, download and etc.
For more details, please check out the help text.
"""
pass
@data.command("upload", short_help="Upload Table Command")
@cli_args.CONF_PATH
@click.option('--verbose', is_flag=True, default=False,
help="If specified, verbose mode will be turn on. "
"Users can have feedback on upload task in progress. (default: False)")
@click.option('--drop', is_flag=True, default=False,
help="If specified, data of old version would be replaced by the current version. "
"Otherwise, current upload task would be rejected. (default: False)")
@click.pass_context
def upload(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Upload Data Table.
\b
- Usage:
flow data upload -c fateflow/examples/upload/upload_guest.json
flow data upload -c fateflow/examples/upload/upload_host.json --verbose --drop
"""
kwargs['drop'] = 1 if kwargs['drop'] else 0
kwargs['verbose'] = int(kwargs['verbose'])
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('use_local_data', 1):
file_name = check_abs_path(config_data.get('file'))
if os.path.exists(file_name):
with open(file_name, 'rb') as fp:
data = MultipartEncoder(
fields={'file': (os.path.basename(file_name), fp, 'application/octet-stream')}
)
tag = [0]
def read_callback(monitor):
if config_data.get('verbose') == 1:
sys.stdout.write("\r UPLOADING:{0}{1}".format(
"|" * (monitor.bytes_read * 100 // monitor.len), '%.2f%%' % (monitor.bytes_read * 100 // monitor.len)))
sys.stdout.flush()
if monitor.bytes_read / monitor.len == 1:
tag[0] += 1
if tag[0] == 2:
sys.stdout.write('\n')
data = MultipartEncoderMonitor(data, read_callback)
access_server('post', ctx, 'data/upload', json_data=None, data=data,
params=json.dumps(config_data), headers={'Content-Type': data.content_type})
else:
prettify(
{
"retcode": 100,
"retmsg": "The file is obtained from the fate flow client machine, but it does not exist, "
"please check the path: {}".format(file_name)
}
)
else:
access_server('post', ctx, 'data/upload', config_data)
@data.command("download", short_help="Download Table Command")
@cli_args.CONF_PATH
@click.pass_context
def download(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Data Table.
\b
- Usage:
flow data download -c fateflow/examples/download/download_table.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/download", config_data)
@data.command("writer", short_help="write Table Command")
@cli_args.CONF_PATH
@click.pass_context
def writer(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Data Table.
\b
- Usage:
flow data download -c fateflow/examples/writer/external_storage.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/writer", config_data)
@data.command("upload-history", short_help="Upload History Command")
@cli_args.LIMIT
@cli_args.JOBID
@click.pass_context
def upload_history(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query Upload Table History.
\b
- USAGE:
flow data upload-history -l 20
flow data upload-history --job-id $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "data/upload/history", config_data)
# @data.command(short_help="")
@click.pass_context
def download_history(ctx):
"""
"""
pass
| 5,063 | 32.098039 | 131 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/component.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from datetime import datetime
import click
from contextlib import closing
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (prettify, preprocess, download_from_request,
access_server, check_abs_path)
@click.group(short_help="Component Operations")
@click.pass_context
def component(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@component.command("list", short_help="List Components Command")
@cli_args.JOBID_REQUIRED
@click.pass_context
def list(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List components of a specified job.
\b
- USAGE:
flow component list -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/list', config_data)
@component.command("metrics", short_help="Component Metrics Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def metrics(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the List of Metrics.
\b
- USAGE:
flow component metrics -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/metrics', config_data)
@component.command("metric-all", short_help="Component Metric All Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def metric_all(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query All Metric Data.
\b
- USAGE:
flow component metric-all -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/metric/all', config_data)
@component.command("metric-delete", short_help="Delete Metric Command")
@click.option('-d', '--date', type=click.STRING,
help="An 8-digit valid date, format like 'YYYYMMDD'")
@cli_args.JOBID
@click.pass_context
def metric_delete(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Delete specified metric.
If you input both two optional arguments, the 'date' argument will be detected in priority,
while the job id will be ignored.
\b
- USAGE:
flow component metric-delete -d 20200101
flow component metric-delete -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('date'):
config_data['model'] = config_data.pop('date')
access_server('post', ctx, 'tracking/component/metric/delete', config_data)
@component.command("parameters", short_help="Component Parameters Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def parameters(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the parameters of a specified component.
\b
- USAGE:
flow component parameters -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/parameters', config_data)
@component.command("output-data", short_help="Component Output Data Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.option('-l', '--limit', type=click.INT, default=-1,
help='limit count, defaults is -1 (download all output data)')
@click.pass_context
def output_data(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download the Output Data of A Specified Component.
\b
- USAGE:
flow component output-data -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0 --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(config_data['job_id'],
config_data['component_name'],
config_data['role'],
config_data['party_id'])
extract_dir = os.path.join(config_data['output_path'], tar_file_name.replace('.tar.gz', ''))
with closing(access_server('get', ctx, 'tracking/component/output/data/download',
config_data, False, stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
res = {
'retcode': 0,
'directory': os.path.abspath(extract_dir),
'retmsg': 'Download successfully, please check {} directory'.format(
os.path.abspath(extract_dir))}
except BaseException:
res = {'retcode': 100,
'retmsg': 'Download failed, please check if the parameters are correct.'}
else:
try:
res = response.json()
except Exception:
res = {'retcode': 100,
'retmsg': 'Download failed, for more details please check logs/fate_flow/fate_flow_stat.log.'}
prettify(res)
@component.command("output-model", short_help="Component Output Model Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def output_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the Model of A Speicied Component.
\b
- USAGE:
flow component output-model -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/output/model', config_data)
@component.command("output-data-table", short_help="Component Output Data Table Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def output_data_table(ctx, **kwargs):
"""
\b
- DESCRIPTION:
View Table Name and Namespace.
\b
- USAGE:
flow component output-data-table -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/output/data/table', config_data)
@component.command("get-summary", short_help="Download Component Summary Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.OUTPUT_PATH
@click.pass_context
def get_summary(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download summary of a specified component and save it as a json file.
\b
- USAGE:
flow component get-summary -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
flow component get-summary -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0 -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.get("output_path"):
if not os.path.isdir(config_data.get("output_path")):
res = {
"retcode": 100,
"retmsg": "Please input a valid directory path."
}
else:
config_data["filename"] = "summary_{}_{}.json".format(config_data['component_name'],
datetime.now().strftime('%Y%m%d%H%M%S'))
config_data["output_path"] = os.path.join(check_abs_path(
config_data["output_path"]), config_data["filename"])
with closing(access_server("post", ctx, "tracking/component/summary/download",
config_data, False, stream=True)) as response:
if response.status_code == 200:
with open(config_data["output_path"], "wb") as fout:
for chunk in response.iter_content(1024):
if chunk:
fout.write(chunk)
res = {
"retcode": 0,
"retmsg": "The summary of component <{}> has been stored successfully. "
"File path is: {}.".format(config_data["component_name"],
config_data["output_path"])
}
else:
try:
res = response.json()
except Exception:
res = {"retcode": 100,
"retmsg": "Download component summary failed, "
"for more details, please check logs/fate_flow/fate_flow_stat.log."}
prettify(res)
else:
access_server("post", ctx, "tracking/component/summary/download", config_data)
@component.command('hetero-model-merge', short_help='Merge Hetero Model Command')
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.GUEST_PARTYID_REQUIRED
@cli_args.HOST_PARTYIDS_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.option('--model-type', type=click.STRING, required=True)
@click.option('--output-format', type=click.STRING, required=True)
@click.option('--target-name', type=click.STRING)
@click.option('--host-rename/--no-host-rename', is_flag=True, default=None)
@click.option('--include-guest-coef/--no-include-guest-coef', is_flag=True, default=None)
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def hetero_model_merge(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Merge a hetero model.
\b
- USAGE:
flow component hetero-model-merge --model-id guest-9999#host-9998#model --model-version 202208241838502253290 --guest-party-id 9999 --host-party-ids 9998,9997 --component-name hetero_secure_boost_0 --model-type secureboost --output-format pmml --target-name y --no-host-rename --no-include-guest-coef --output-path model.xml
"""
config_data, dsl_data = preprocess(**kwargs)
config_data['host_party_ids'] = config_data['host_party_ids'].split(',')
response = access_server('post', ctx, 'component/hetero/merge', config_data, False)
if not response.json().get('data'):
prettify(response)
return
with open(config_data['output_path'], 'w', encoding='utf-8') as f:
f.write(response.json()['data'])
@component.command('woe-array-extract', short_help='Extract WOE Array Command')
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def woe_array_extract(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Extract WOE array from a guest model.
\b
- USAGE:
flow component woe-array-extract --model-id guest-9999#host-10000#model --model-version 202211142055541649630 --role guest --party-id 9999 --component-name hetero_feature_binning_0 --output-path woe_array.json
"""
config_data, dsl_data = preprocess(**kwargs)
response = access_server('post', ctx, 'component/woe_array/extract', config_data, False)
if not response.json().get('data'):
prettify(response)
return
with open(config_data['output_path'], 'w', encoding='utf-8') as f:
json.dump(response.json()['data'], f)
@component.command('woe-array-merge', short_help='Merge WOE Array Command')
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.INPUT_PATH_REQUIRED
@click.pass_context
def woe_array_merge(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Merge WOE array into a host model.
\b
- USAGE:
flow component woe-array-merge --model-id guest-9999#host-10000#model --model-version 202211142055541649630 --role host --party-id 10000 --component-name hetero_feature_binning_0 --input-path woe_array.json
"""
config_data, dsl_data = preprocess(**kwargs)
with open(config_data.pop('input_path'), 'r', encoding='utf-8') as f:
config_data['woe_array'] = json.load(f)
access_server('post', ctx, 'component/woe_array/merge', config_data)
| 13,399 | 35.021505 | 332 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/service.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (preprocess, access_server)
@click.group(short_help="FATE Flow External Service Operations")
@click.pass_context
def service(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@service.command("registry", short_help="Show Registry Command")
@click.pass_context
def registry(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'server/service/get', config_data)
| 1,273 | 31.666667 | 94 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/key.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server
@click.group(short_help="Key Operations")
@click.pass_context
def key(ctx):
"""
\b
Provides numbers of key operational commands, including save, query and delete.
For more details, please check out the help text.
"""
pass
@key.command("save", short_help="Save Public Key Command")
@cli_args.CONF_PATH
@click.pass_context
def save(ctx, **kwargs):
"""
- DESCRIPTION:
\b
save other site public key
\b
- USAGE:
flow key save -c fateflow/examples/key/save_public_key.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/public/save', config_data)
@key.command("delete", short_help="Delete Public Key Command")
@cli_args.PARTYID_REQUIRED
@click.pass_context
def delete(ctx, **kwargs):
"""
- DESCRIPTION:
\b
delete other site public key
\b
- USAGE:
flow key delete -p 10000
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/public/delete', config_data)
@key.command("query", short_help="Query Public Key Command")
@cli_args.PARTYID_REQUIRED
@click.pass_context
def query(ctx, **kwargs):
"""
- DESCRIPTION:
\b
query site public key
\b
- USAGE:
flow key query -p 10000
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'key/query', config_data)
| 2,153 | 23.758621 | 83 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/job.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from datetime import datetime
import click
import requests
from contextlib import closing
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (preprocess, download_from_request, access_server,
prettify, check_abs_path)
@click.group(short_help="Job Operations")
@click.pass_context
def job(ctx):
"""
\b
Provides numbers of job operational commands, including submit, stop, query and etc.
For more details, please check out the help text.
"""
pass
@job.command("submit", short_help="Submit Job Command")
@cli_args.CONF_PATH
@cli_args.DSL_PATH
@click.pass_context
def submit(ctx, **kwargs):
"""
- DESCRIPTION:
\b
Submit a pipeline job.
Used to be 'submit_job'.
\b
- USAGE:
flow job submit -c fate_flow/examples/test_hetero_lr_job_conf.json -d fate_flow/examples/test_hetero_lr_job_dsl.json
"""
config_data, dsl_data = preprocess(**kwargs)
post_data = {
'job_dsl': dsl_data,
'job_runtime_conf': config_data
}
access_server('post', ctx, 'job/submit', post_data)
@job.command("list", short_help="List Job Command")
@cli_args.LIMIT
@click.pass_context
def list_job(ctx, **kwargs):
"""
- DESCRIPTION:
List job.
\b
- USAGE:
flow job list
flow job list -l 30
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/list/job', config_data)
@job.command("query", short_help="Query Job Command")
@cli_args.JOBID
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.STATUS
@click.pass_context
def query(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query job information by filters.
Used to be 'query_job'.
\b
- USAGE:
flow job query -r guest -p 9999 -s success
flow job query -j $JOB_ID -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
response = access_server('post', ctx, "job/query", config_data, False)
if isinstance(response, requests.models.Response):
response = response.json()
if response['retcode'] == 0:
for i in range(len(response['data'])):
del response['data'][i]['f_runtime_conf']
del response['data'][i]['f_dsl']
prettify(response)
# @job.command("clean", short_help="Clean Job Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.COMPONENT_NAME
@click.pass_context
def clean(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Clean processor, data table and metric data.
Used to be 'clean_job'.
\b
- USAGE:
flow job clean -j $JOB_ID -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "job/clean", config_data)
@job.command("stop", short_help="Stop Job Command")
@cli_args.JOBID_REQUIRED
@click.pass_context
def stop(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Stop a specified job.
\b
- USAGE:
flow job stop -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "job/stop", config_data)
@job.command("rerun", short_help="Rerun Job Command")
@cli_args.JOBID_REQUIRED
@cli_args.FORCE
@cli_args.COMPONENT_NAME
@click.pass_context
def rerun(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, "job/rerun", config_data)
@job.command("config", short_help="Config Job Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def config(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Configurations of A Specified Job.
\b
- USAGE:
flow job config -j $JOB_ID -r host -p 10000 --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
response = access_server('post', ctx, 'job/config', config_data, False)
if isinstance(response, requests.models.Response):
response = response.json()
if response['retcode'] == 0:
job_id = response['data']['job_id']
download_directory = os.path.join(os.path.abspath(config_data['output_path']), 'job_{}_config'.format(job_id))
os.makedirs(download_directory, exist_ok=True)
for k, v in response['data'].items():
if k == 'job_id':
continue
with open('{}/{}.json'.format(download_directory, k), 'w') as fw:
json.dump(v, fw, indent=4)
del response['data']['dsl']
del response['data']['runtime_conf']
response['directory'] = download_directory
response['retmsg'] = 'download successfully, please check {} directory'.format(download_directory)
prettify(response)
@job.command("log", short_help="Log Job Command")
@cli_args.JOBID_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def log(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download Log Files of A Specified Job.
\b
- USAGE:
flow job log -j JOB_ID --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
job_id = config_data['job_id']
tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id))
with closing(access_server('post', ctx, 'job/log/download', config_data, False, stream=True)) as response:
if response.status_code == 200:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
res = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
else:
res = response.json() if isinstance(response, requests.models.Response) else response
prettify(res)
@job.command("view", short_help="Query Job Data View Command")
@cli_args.JOBID
@cli_args.ROLE
@cli_args.PARTYID
@cli_args.STATUS
@click.pass_context
def view(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query job data view information by filters.
Used to be 'data_view_query'.
\b
- USAGE:
flow job view -r guest -p 9999
flow job view -j $JOB_ID -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/data/view/query', config_data)
@job.command("dsl", short_help="Generate Predict DSL Command")
@click.option("--cpn-list", type=click.STRING,
help="User inputs a string to specify component list")
@click.option("--cpn-path", type=click.Path(exists=True),
help="User specifies a file path which records the component list.")
@click.option("--train-dsl-path", type=click.Path(exists=True), required=True,
help="User specifies the train dsl file path.")
@cli_args.OUTPUT_PATH
@click.pass_context
def dsl_generator(ctx, **kwargs):
"""
\b
- DESCRIPTION:
A predict dsl generator.
Before using predict dsl generator, users should prepare:
1. name list of component which you are going to use in predict progress,
2. the train dsl file path you specified in train progress.
\b
Notice that users can choose to specify the component name list by using a text file,
or, by typing in terminal. We, however, strongly recommend users using prepared files
to specify the component list in order to avoid some unnecessary mistakes.
\b
- USAGE:
flow job dsl --cpn-path fate_flow/examples/component_list.txt --train-dsl-path fate_flow/examples/test_hetero_lr_job_dsl.json -o fate_flow/examples/
flow job dsl --cpn-list "dataio_0, hetero_feature_binning_0, hetero_feature_selection_0, evaluation_0" --train-dsl-path fate_flow/examples/test_hetero_lr_job_dsl.json -o fate_flow/examples/
flow job dsl --cpn-list [dataio_0,hetero_feature_binning_0,hetero_feature_selection_0,evaluation_0] --train-dsl-path fate_flow/examples/test_hetero_lr_job_dsl.json -o fate_flow/examples/
"""
if kwargs.get("cpn_list"):
cpn_str = kwargs.get("cpn_list")
elif kwargs.get("cpn_path"):
with open(kwargs.get("cpn_path"), "r") as fp:
cpn_str = fp.read()
else:
cpn_str = ""
with open(kwargs.get("train_dsl_path"), "r") as ft:
train_dsl = ft.read()
config_data = {
"cpn_str": cpn_str,
"train_dsl": train_dsl,
"version": "2",
}
if kwargs.get("output_path"):
dsl_filename = "predict_dsl_{}.json".format(datetime.now().strftime('%Y%m%d%H%M%S'))
output_path = os.path.join(check_abs_path(kwargs.get("output_path")), dsl_filename)
config_data["filename"] = dsl_filename
with closing(access_server('post', ctx, 'job/dsl/generate', config_data, False, stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "wb") as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
res = {'retcode': 0,
'retmsg': "New predict dsl file has been generated successfully. "
"File path is: {}".format(output_path)}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': "New predict dsl file generated failed."
"For more details, please check logs/fate_flow/fate_flow_stat.log"}
prettify(res)
else:
access_server('post', ctx, 'job/dsl/generate', config_data)
@job.command("parameter-update", short_help="Update Job Components Parameters Command")
@cli_args.JOBID_REQUIRED
@cli_args.CONF_PATH
@click.pass_context
def update_parameter(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'job/parameter/update', config_data)
| 10,950 | 32.695385 | 197 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/tracking.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime
import click
from contextlib import closing
import requests
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (prettify, preprocess, download_from_request,
access_server, check_abs_path)
@click.group(short_help="Component Operations")
@click.pass_context
def tracking(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@tracking.command("metrics", short_help="Component Metrics Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def metrics(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the List of Metrics.
\b
- USAGE:
flow component metrics -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/metrics', config_data)
@tracking.command("metric-all", short_help="Component Metric All Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def metric_all(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query All Metric Data.
\b
- USAGE:
flow component metric-all -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/metric/all', config_data)
@tracking.command("parameters", short_help="Component Parameters Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def parameters(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the parameters of a specified component.
\b
- USAGE:
flow component parameters -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/parameters', config_data)
@tracking.command("output-data", short_help="Component Output Data Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.option('-l', '--limit', type=click.INT, default=-1,
help='limit count, defaults is -1 (download all output data)')
@click.pass_context
def output_data(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download the Output Data of A Specified Component.
\b
- USAGE:
flow component output-data -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0 --output-path ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(config_data['job_id'],
config_data['component_name'],
config_data['role'],
config_data['party_id'])
extract_dir = os.path.join(config_data['output_path'], tar_file_name.replace('.tar.gz', ''))
with closing(access_server('get', ctx, 'tracking/component/output/data/download',
config_data, False, stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
res = {
'retcode': 0,
'directory': os.path.abspath(extract_dir),
'retmsg': 'Download successfully, please check {} directory'.format(
os.path.abspath(extract_dir))}
except BaseException:
res = {'retcode': 100,
'retmsg': 'Download failed, please check if the parameters are correct.'}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': 'Download failed, for more details please check logs/fate_flow/fate_flow_stat.log.'}
prettify(res)
@tracking.command("output-model", short_help="Component Output Model Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def output_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Query the Model of A Speicied Component.
\b
- USAGE:
flow component output-model -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/output/model', config_data)
@tracking.command("output-data-table", short_help="Component Output Data Table Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@click.pass_context
def output_data_table(ctx, **kwargs):
"""
\b
- DESCRIPTION:
View Table Name and Namespace.
\b
- USAGE:
flow component output-data-table -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'tracking/component/output/data/table', config_data)
@tracking.command("get-summary", short_help="Download Component Summary Command")
@cli_args.JOBID_REQUIRED
@cli_args.ROLE_REQUIRED
@cli_args.PARTYID_REQUIRED
@cli_args.COMPONENT_NAME_REQUIRED
@cli_args.OUTPUT_PATH
@click.pass_context
def get_summary(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Download summary of a specified component and save it as a json file.
\b
- USAGE:
flow component get-summary -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0
flow component get-summary -j $JOB_ID -r host -p 10000 -cpn hetero_feature_binning_0 -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.get("output_path"):
if not os.path.isdir(config_data.get("output_path")):
res = {
"retcode": 100,
"retmsg": "Please input a valid directory path."
}
else:
config_data["filename"] = "summary_{}_{}.json".format(config_data['component_name'],
datetime.now().strftime('%Y%m%d%H%M%S'))
config_data["output_path"] = os.path.join(check_abs_path(
config_data["output_path"]), config_data["filename"])
with closing(access_server("post", ctx, "tracking/component/summary/download",
config_data, False, stream=True)) as response:
if response.status_code == 200:
with open(config_data["output_path"], "wb") as fout:
for chunk in response.iter_content(1024):
if chunk:
fout.write(chunk)
res = {
"retcode": 0,
"retmsg": "The summary of component <{}> has been stored successfully. "
"File path is: {}.".format(config_data["component_name"],
config_data["output_path"])
}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {"retcode": 100,
"retmsg": "Download component summary failed, "
"for more details, please check logs/fate_flow/fate_flow_stat.log."}
prettify(res)
else:
access_server("post", ctx, "tracking/component/summary/download", config_data)
| 8,924 | 36.343096 | 118 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/commands/provider.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from flow_client.flow_cli.utils import cli_args
from flow_client.flow_cli.utils.cli_utils import (preprocess, access_server, check_abs_path)
@click.group(short_help="Component Provider Operations")
@click.pass_context
def provider(ctx):
"""
\b
Provides numbers of component operational commands, including metrics, parameters and etc.
For more details, please check out the help text.
"""
pass
@provider.command("list", short_help="List All Providers Command")
@click.pass_context
@click.option("-n", "--provider-name", type=click.STRING, help="Provider Name")
def list_providers(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
if kwargs.get("provider_name"):
access_server("post", ctx, f"provider/{kwargs['provider_name']}/get", config_data)
else:
access_server("post", ctx, "provider/get", config_data)
@provider.command("register", short_help="Register New Provider Command")
@cli_args.CONF_PATH
@click.pass_context
def register(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
for p in {"path"}:
config_data[p] = check_abs_path(config_data.get(p))
access_server("post", ctx, "provider/register", config_data)
@provider.command("list-components", short_help="List All Components Command")
@click.pass_context
def list_components(ctx, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
access_server("post", ctx, "component/get", config_data)
| 2,094 | 33.916667 | 94 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/utils/cli_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import json
import os
import sys
import tarfile
import traceback
import typing
from base64 import b64encode
from hmac import HMAC
from time import time
from urllib.parse import quote, urlencode
from uuid import uuid1
import click
import requests
class Response(requests.models.Response):
def __init__(self, resp, status):
super().__init__()
self.encoding = 'utf-8'
self._content = json.dumps(resp).encode(self.encoding)
self._content_consumed = True
self.status_code = status
self.headers['Content-Type'] = 'application/json'
def check_config(config: typing.Dict, required_arguments: typing.List):
no_arguments = []
error_arguments = []
for require_argument in required_arguments:
if isinstance(require_argument, tuple):
config_value = config.get(require_argument[0], None)
if isinstance(require_argument[1], (tuple, list)):
if config_value not in require_argument[1]:
error_arguments.append(require_argument)
elif config_value != require_argument[1]:
error_arguments.append(require_argument)
elif require_argument not in config:
no_arguments.append(require_argument)
if no_arguments or error_arguments:
raise Exception('the following arguments are required: {} {}'.format(
','.join(no_arguments), ','.join(['{}={}'.format(a[0], a[1]) for a in error_arguments])))
def prettify(response):
if isinstance(response, requests.models.Response):
try:
response = response.json()
except json.decoder.JSONDecodeError:
response = {
'retcode': 100,
'retmsg': response.text,
}
click.echo(json.dumps(response, indent=4, ensure_ascii=False))
click.echo('')
return response
def access_server(method, ctx, postfix, json_data=None, echo=True, **kwargs):
if not ctx.obj.get('initialized', False):
response = {
'retcode': 100,
'retmsg': (
'Fate flow CLI has not been initialized yet or configured incorrectly. '
'Please initialize it before using CLI at the first time. '
'And make sure the address of fate flow server is configured correctly. '
'The configuration file path is: "{}".'.format(
os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
'settings.yaml',
))
)
)
}
if echo:
prettify(response)
return
return Response(response, 500)
sess = requests.Session()
stream = kwargs.pop('stream', sess.stream)
timeout = kwargs.pop('timeout', None)
prepped = requests.Request(
method, '/'.join([
ctx.obj['server_url'],
postfix,
]),
json=json_data, **kwargs
).prepare()
if ctx.obj.get('app_key') and ctx.obj.get('secret_key'):
timestamp = str(round(time() * 1000))
nonce = str(uuid1())
signature = b64encode(HMAC(ctx.obj['secret_key'].encode('ascii'), b'\n'.join([
timestamp.encode('ascii'),
nonce.encode('ascii'),
ctx.obj['app_key'].encode('ascii'),
prepped.path_url.encode('ascii'),
prepped.body if json_data is not None else b'',
urlencode(sorted(kwargs['data'].items()), quote_via=quote, safe='-._~').encode('ascii')
if kwargs.get('data') and isinstance(kwargs['data'], dict) else b'',
]), 'sha1').digest()).decode('ascii')
prepped.headers.update({
'TIMESTAMP': timestamp,
'NONCE': nonce,
'APP_KEY': ctx.obj['app_key'],
'SIGNATURE': signature,
})
try:
response = sess.send(prepped, stream=stream, timeout=timeout)
if echo:
prettify(response)
return
return response
except Exception as e:
exc_type, exc_value, exc_traceback_obj = sys.exc_info()
response = {
'retcode': 100,
'retmsg': str(e),
'traceback': traceback.format_exception(
exc_type,
exc_value,
exc_traceback_obj,
),
}
if 'Connection refused' in str(e):
response['retmsg'] = (
'Connection refused. '
'Please check if the fate flow service is started.'
)
del response['traceback']
elif 'Connection aborted' in str(e):
response['retmsg'] = (
'Connection aborted. '
'Please make sure that the address of fate flow server is configured correctly. '
'The configuration file path is: {}'.format(
os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
'settings.yaml',
))
)
)
del response['traceback']
if echo:
prettify(response)
return
return Response(response, 500)
def preprocess(**kwargs):
config_data = {}
if kwargs.get('conf_path'):
conf_path = os.path.abspath(kwargs.get('conf_path'))
with open(conf_path, 'r') as conf_fp:
config_data = json.load(conf_fp)
if config_data.get('output_path'):
config_data['output_path'] = os.path.abspath(config_data['output_path'])
if ('party_id' in kwargs.keys()) or ('role' in kwargs.keys()):
config_data['local'] = config_data.get('local', {})
if kwargs.get('party_id'):
config_data['local']['party_id'] = kwargs.get('party_id')
if kwargs.get('role'):
config_data['local']['role'] = kwargs.get('role')
config_data.update(dict((k, v) for k, v in kwargs.items() if v is not None))
dsl_data = {}
if kwargs.get('dsl_path'):
dsl_path = os.path.abspath(kwargs.get('dsl_path'))
with open(dsl_path, 'r') as dsl_fp:
dsl_data = json.load(dsl_fp)
return config_data, dsl_data
def download_from_request(http_response, tar_file_name, extract_dir):
with open(tar_file_name, 'wb') as fw:
for chunk in http_response.iter_content(1024):
if chunk:
fw.write(chunk)
tar = tarfile.open(tar_file_name, "r:gz")
file_names = tar.getnames()
for file_name in file_names:
tar.extract(file_name, extract_dir)
tar.close()
os.remove(tar_file_name)
def check_abs_path(path):
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(os.getcwd(), path))
def get_project_base_directory():
config = configparser.ConfigParser()
config.read_file(open(os.path.join(os.path.dirname(__file__), os.pardir, 'settings.ini')))
return config["fate_root"]["project_path"]
def string_to_bytes(string):
return string if isinstance(string, bytes) else string.encode(encoding="utf-8")
| 7,929 | 32.179916 | 101 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/utils/detect_utils.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
def check_config(config: typing.Dict, required_arguments: typing.List):
no_arguments = []
error_arguments = []
for require_argument in required_arguments:
if isinstance(require_argument, tuple):
config_value = config.get(require_argument[0], None)
if isinstance(require_argument[1], (tuple, list)):
if config_value not in require_argument[1]:
error_arguments.append(require_argument)
elif config_value != require_argument[1]:
error_arguments.append(require_argument)
elif require_argument not in config:
no_arguments.append(require_argument)
if no_arguments or error_arguments:
raise Exception('the following arguments are required: {} {}'.format(
','.join(no_arguments), ','.join(['{}={}'.format(a[0], a[1]) for a in error_arguments])))
| 1,522 | 42.514286 | 101 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/utils/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_client/flow_cli/utils/cli_args.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
CONF_PATH = click.option("-c", "--conf-path", type=click.Path(exists=True), required=True,
help="Configuration file path.")
DSL_PATH = click.option("-d", "--dsl-path", type=click.Path(exists=True),
help="Domain-specific language(DSL) file path. If the type of job is 'predict', "
"you can leave this feature blank, or you can provide a valid dsl file to "
"replace the one that aotumatically generated by fate.")
LIMIT = click.option("-l", "--limit", type=click.INT, default=10,
help="LIMIT flag constrains the number of records to return. (default: 10)")
JOBID = click.option("-j", "--job-id", type=click.STRING,
help="A valid job id.")
JOBID_REQUIRED = click.option("-j", "--job-id", type=click.STRING, required=True,
help="A valid job id.")
role_choices_list = ["local", "guest", "arbiter", "host"]
ROLE = click.option("-r", "--role", type=click.Choice(role_choices_list), metavar="TEXT",
help="Role name. Users can choose one from {} and {}.".format(",".join(role_choices_list[:-1]),
role_choices_list[-1]))
ROLE_REQUIRED = click.option("-r", "--role", type=click.Choice(role_choices_list), required=True, metavar="TEXT",
help="Role name. Users can choose one from {} and {}.".format(
",".join(role_choices_list[:-1]),
role_choices_list[-1]))
PARTYID = click.option("-p", "--party-id", type=click.STRING,
help="A valid party id.")
PARTYID_REQUIRED = click.option("-p", "--party-id", type=click.STRING, required=True,
help="A valid party id.")
GUEST_PARTYID_REQUIRED = click.option("-gid", "--guest-party-id", type=click.STRING, required=True,
help="A valid party id.")
HOST_PARTYID_REQUIRED = click.option("-hid", "--host-party-id", type=click.STRING, required=True,
help="A valid party id.")
ARBITER_PARTYID_REQUIRED = click.option("-aid", "--arbiter-party-id", type=click.STRING, required=True,
help="A valid party id.")
HOST_PARTYIDS_REQUIRED = click.option("-hids", "--host-party-ids", type=click.STRING, required=True,
help="Multiple party ids, use a comma to separate each one.")
COMPONENT_NAME = click.option("-cpn", "--component-name", type=click.STRING,
help="A valid component name.")
COMPONENT_NAME_REQUIRED = click.option("-cpn", "--component-name", type=click.STRING, required=True,
help="A valid component name.")
status_choices_list = ["success", "failed", "running", "waiting", "timeout", "canceled", "partial", "deleted"]
STATUS = click.option("-s", "--status", type=click.Choice(status_choices_list), metavar="TEXT",
help="Job status. Users can choose one from {} and {}.".format(
", ".join(status_choices_list[:-1]),
status_choices_list[-1]))
OUTPUT_PATH = click.option("-o", "--output-path", type=click.Path(exists=False),
help="User specifies output directory/file path.")
OUTPUT_PATH_REQUIRED = click.option("-o", "--output-path", type=click.Path(exists=False), required=True,
help="User specifies output directory/file path.")
INPUT_PATH = click.option("-i", "--input-path", type=click.Path(exists=True),
help="User specifies input directory/file path.")
INPUT_PATH_REQUIRED = click.option("-i", "--input-path", type=click.Path(exists=True), required=True,
help="User specifies input directory/file path.")
NAMESPACE = click.option("-n", "--namespace", type=click.STRING,
help="Namespace.")
NAMESPACE_REQUIRED = click.option("-n", "--namespace", type=click.STRING, required=True,
help="Namespace.")
TABLE_NAME = click.option("-t", "--table-name", type=click.STRING,
help="Table name.")
TABLE_NAME_REQUIRED = click.option("-t", "--table-name", type=click.STRING, required=True,
help="Table name.")
TAG_NAME_REQUIRED = click.option("-t", "--tag-name", type=click.STRING, required=True,
help="The name of tag.")
TAG_DESCRIPTION = click.option("-d", "--tag-desc", type=click.STRING,
help="The description of tag. Note that if there are some whitespaces in description, "
"please make sure the description text is enclosed in double quotation marks.")
MODEL_ID = click.option("--model-id", type=click.STRING, help="Model id.")
MODEL_ID_REQUIRED = click.option("--model-id", type=click.STRING, required=True, help="Model id.")
MODEL_VERSION = click.option("--model-version", type=click.STRING, help="Model version.")
MODEL_VERSION_REQUIRED = click.option("--model-version", type=click.STRING, required=True, help="Model version.")
SERVICE_NAME = click.option("--service", type=click.STRING, required=True, help="Service Name.")
FORCE = click.option("--force", is_flag=True, default=False, help="Force execute.")
SIMPLE = click.option("-s", '--simple', is_flag=True, default=False, help="Simple output.")
TIMEOUT = click.option("--timeout", type=click.INT, default=300,
help="Timeout limit, default 300 seconds.")
TASK_CORES = click.option("--task-cores", type=click.INT, default=2,
help="Run Job Task Cores, default 2 cores.")
SRC_PARTY_ID = click.option("--src-party-id", type=click.STRING, required=True, help="Source party id.")
SRC_ROLE = click.option("--src-role", type=click.Choice(role_choices_list), required=True, metavar="TEXT",
help="Source role name. Users can choose one from {} and {}.".format(
",".join(role_choices_list[:-1]),
role_choices_list[-1]))
MIN_DATA = click.option("--min-data", type=click.INT, help="min data")
CONNECTOR_NAME = click.option("--connector-name", type=click.STRING, required=True, help="connector name")
| 7,116 | 59.313559 | 118 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/utils.py
|
import os
import json
import socket
import time
import typing
import tarfile
import datetime
from enum import Enum, IntEnum
PROJECT_BASE = os.getenv("FATE_DEPLOY_BASE")
def start_cluster_standalone_job_server():
print("use service.sh to start standalone node server....")
os.system("sh service.sh start --standalone_node")
time.sleep(5)
def get_parser_version_set():
return {"1", "2"}
def get_project_base_directory():
global PROJECT_BASE
if PROJECT_BASE is None:
PROJECT_BASE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir
)
)
return PROJECT_BASE
def download_from_request(http_response, tar_file_name, extract_dir):
with open(tar_file_name, "wb") as fw:
for chunk in http_response.iter_content(1024):
if chunk:
fw.write(chunk)
tar = tarfile.open(tar_file_name, "r:gz")
file_names = tar.getnames()
for file_name in file_names:
tar.extract(file_name, extract_dir)
tar.close()
os.remove(tar_file_name)
def check_config(config: typing.Dict, required_arguments: typing.List):
no_arguments = []
error_arguments = []
for require_argument in required_arguments:
if isinstance(require_argument, tuple):
config_value = config.get(require_argument[0], None)
if isinstance(require_argument[1], (tuple, list)):
if config_value not in require_argument[1]:
error_arguments.append(require_argument)
elif config_value != require_argument[1]:
error_arguments.append(require_argument)
elif require_argument not in config:
no_arguments.append(require_argument)
if no_arguments or error_arguments:
raise Exception(
"the following arguments are required: {} {}".format(
",".join(no_arguments),
",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]),
)
)
def preprocess(**kwargs):
kwargs.pop('self', None)
kwargs.pop('kwargs', None)
config_data = kwargs.pop('config_data', {})
dsl_data = kwargs.pop('dsl_data', {})
output_path = kwargs.pop('output_path', None)
if output_path is not None:
config_data['output_path'] = os.path.abspath(output_path)
local = config_data.pop('local', {})
party_id = kwargs.pop('party_id', None)
role = kwargs.pop('role', None)
if party_id is not None:
kwargs['party_id'] = local['party_id'] = int(party_id)
if role is not None:
kwargs['role'] = local['role'] = role
if local:
config_data['local'] = local
for k, v in kwargs.items():
if v is not None:
if k in {'job_id', 'model_version'}:
v = str(v)
elif k in {'party_id', 'step_index'}:
v = int(v)
config_data[k] = v
return config_data, dsl_data
def check_output_path(path):
if not os.path.isabs(path):
return os.path.join(os.path.abspath(os.curdir), path)
return path
def string_to_bytes(string):
return string if isinstance(string, bytes) else string.encode(encoding="utf-8")
def get_lan_ip():
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack("256s", string_to_bytes(ifname[:15])),
)[20:24]
)
ip = socket.gethostbyname(socket.getfqdn())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"bond1",
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError as e:
pass
return ip or ""
class CustomJSONEncoder(json.JSONEncoder):
def __init__(self, **kwargs):
super(CustomJSONEncoder, self).__init__(**kwargs)
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(obj, datetime.date):
return obj.strftime("%Y-%m-%d")
elif isinstance(obj, datetime.timedelta):
return str(obj)
elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum):
return obj.value
else:
return json.JSONEncoder.default(self, obj)
def json_dumps(src, byte=False, indent=None):
if byte:
return string_to_bytes(json.dumps(src, indent=indent, cls=CustomJSONEncoder))
else:
return json.dumps(src, indent=indent, cls=CustomJSONEncoder)
| 5,047 | 28.694118 | 85 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/base.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import json
import sys
import traceback
from base64 import b64encode
from hmac import HMAC
from time import time
from urllib.parse import quote, urlencode
from uuid import uuid1
import requests
from flow_sdk.client.api.base import BaseFlowAPI
def _is_api_endpoint(obj):
return isinstance(obj, BaseFlowAPI)
class BaseFlowClient:
API_BASE_URL = ''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, api in api_endpoints:
api_cls = type(api)
api = api_cls(self)
setattr(self, name, api)
return self
def __init__(self, ip, port, version, app_key=None, secret_key=None):
self._http = requests.Session()
self.ip = ip
self.port = port
self.version = version
self.app_key = app_key if app_key and secret_key else None
self.secret_key = secret_key if app_key and secret_key else None
def _request(self, method, url, **kwargs):
stream = kwargs.pop('stream', self._http.stream)
prepped = requests.Request(method, self.API_BASE_URL + url, **kwargs).prepare()
if self.app_key and self.secret_key:
timestamp = str(round(time() * 1000))
nonce = str(uuid1())
signature = b64encode(HMAC(self.secret_key.encode('ascii'), b'\n'.join([
timestamp.encode('ascii'),
nonce.encode('ascii'),
self.app_key.encode('ascii'),
prepped.path_url.encode('ascii'),
prepped.body if kwargs.get('json') else b'',
urlencode(sorted(kwargs['data'].items()), quote_via=quote, safe='-._~').encode('ascii')
if kwargs.get('data') and isinstance(kwargs['data'], dict) else b'',
]), 'sha1').digest()).decode('ascii')
prepped.headers.update({
'TIMESTAMP': timestamp,
'NONCE': nonce,
'APP_KEY': self.app_key,
'SIGNATURE': signature,
})
try:
response = self._http.send(prepped, stream=stream)
except Exception as e:
response = {
'retcode': 100,
'retmsg': str(e),
}
if 'connection refused' in response['retmsg'].lower():
response['retmsg'] = 'Connection refused, Please check if the fate flow service is started'
else:
exc_type, exc_value, exc_traceback_obj = sys.exc_info()
response['traceback'] = traceback.format_exception(exc_type, exc_value, exc_traceback_obj)
return response
@staticmethod
def _decode_result(response):
try:
result = json.loads(response.content.decode('utf-8', 'ignore'), strict=False)
except (TypeError, ValueError):
return response
else:
return result
def _handle_result(self, response):
try:
if isinstance(response, requests.models.Response):
return response.json()
elif isinstance(response, dict):
return response
else:
return self._decode_result(response)
except json.decoder.JSONDecodeError:
res = {'retcode': 100,
'retmsg': "Internal server error. Nothing in response. You may check out the configuration in "
"'FATE/conf/service_conf.yaml' and restart fate flow server."}
return res
def get(self, url, **kwargs):
return self._request(method='get', url=url, **kwargs)
def post(self, url, **kwargs):
return self._request(method='post', url=url, **kwargs)
| 4,394 | 34.443548 | 114 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.base import BaseFlowClient
from flow_sdk.client import api
class FlowClient(BaseFlowClient):
job = api.Job()
component = api.Component()
data = api.Data()
queue = api.Queue()
table = api.Table()
task = api.Task()
model = api.Model()
tag = api.Tag()
privilege = api.Privilege()
checkpoint = api.Checkpoint()
remote_version = api.Version()
test = api.Test()
def __init__(self, ip, port, version, app_key=None, secret_key=None):
super().__init__(ip, port, version, app_key, secret_key)
self.API_BASE_URL = 'http://%s:%s/%s/' % (ip, port, version)
| 1,252 | 32.864865 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/base.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class BaseFlowAPI:
def __init__(self, client=None):
self._client = client
def _get(self, url, handle_result=True, **kwargs):
if handle_result:
return self._handle_result(self._client.get(url, **kwargs))
else:
return self._client.get(url, **kwargs)
def _post(self, url, handle_result=True, **kwargs):
if handle_result:
return self._handle_result(self._client.post(url, **kwargs))
else:
return self._client.post(url, **kwargs)
def _handle_result(self, response):
return self._client._handle_result(response)
@property
def session(self):
return self._client.session
@property
def ip(self):
return self._client.ip
@property
def port(self):
return self._client.port
@property
def version(self):
return self._client.version
| 1,520 | 27.698113 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/checkpoint.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config
class Checkpoint(BaseFlowAPI):
def list_checkpoints(self, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['role', 'party_id', 'model_id', 'model_version', 'component_name'])
return self._post(url='checkpoint/list', json=config_data)
def get_checkpoint(self, **kwargs):
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['role', 'party_id', 'model_id', 'model_version', 'component_name'])
if len(config_data.keys() & {'step_index', 'step_name'}) != 1:
raise KeyError('step_index or step_name is required')
return self._post(url='checkpoint/get', json=config_data)
| 1,510 | 42.171429 | 108 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess
class Test(BaseFlowAPI):
def toy(self, guest_party_id: str, host_party_id: str, guest_user_name: str = "", host_user_name: str = "",
task_cores: int = 2, timeout: int = 60):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
conf = self.toy_conf(**kwargs)
return self._post(url='job/submit', json={
'job_runtime_conf': conf,
'job_dsl': self.toy_dsl(),
})
@classmethod
def toy_conf(cls, guest_party_id: str, host_party_id: str, guest_user_name: str = "", host_user_name: str = "",
task_cores: int = 2, **kwargs):
job_conf = {
"dsl_version": 2,
"job_parameters": {
},
"role": {
"guest": [
guest_party_id
],
"host": [
host_party_id
]
},
"component_parameters": {
"role": {
"guest": {
"0": {
"secure_add_example_0": {
"seed": 123
}
}
},
"host": {
"secure_add_example_0": {
"seed": 321
}
}
},
"common": {
"secure_add_example_0": {
"partition": 4,
"data_num": 1000
}
}
}
}
job_conf["initiator"] = {
"role": "guest",
"party_id": guest_party_id
}
job_conf["role"]["guest"] = [guest_party_id]
job_conf["role"]["host"] = [host_party_id]
job_conf["job_parameters"]["common"] = {
"task_cores": task_cores
}
job_conf["job_parameters"]["role"] = {
"guest": {"0": {"user": guest_user_name}},
"host": {"0": {"user": host_user_name}}
}
return job_conf
@classmethod
def toy_dsl(cls):
dsl = {
"components": {
"secure_add_example_0": {
"module": "SecureAddExample"
}
}
}
return dsl
@classmethod
def check_toy(cls, guest_party_id, job_status, log_dir):
if job_status in {"success", "canceled"}:
info_log = os.path.join(log_dir, "guest", guest_party_id, "INFO.log")
with open(info_log, "r") as fin:
for line in fin:
if line.find("secure_add_guest") != -1:
yield line.strip()
else:
error_log = os.path.join(log_dir, "guest", guest_party_id, "ERROR.log")
with open(error_log, "r") as fin:
for line in fin:
yield line.strip()
| 3,698 | 32.627273 | 115 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/table.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config
class Table(BaseFlowAPI):
def info(self, namespace, table_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['namespace', 'table_name'])
return self._post(url='table/table_info', json=config_data)
def delete(self, namespace=None, table_name=None, job_id=None, role=None, party_id=None, component_name=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='table/delete', json=config_data)
def bind(
self, engine: str, name: str, namespace: str, address: dict,
partitions: int = None, head: bool = None, id_delimiter: str = None,
in_serialized: bool = None, drop: bool = None,
id_column: str = None, feature_column: str = None,
):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='table/bind', json=config_data)
| 1,706 | 40.634146 | 114 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/task.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.utils import preprocess
from flow_sdk.client.api.base import BaseFlowAPI
class Task(BaseFlowAPI):
def list(self, limit=10):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/list/task', json=config_data)
def query(self, job_id=None, role=None, party_id=None, component_name=None, status=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/task/query', json=config_data)
| 1,144 | 37.166667 | 93 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/queue.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
class Queue(BaseFlowAPI):
def clean(self):
return self._post(url='job/clean/queue')
| 763 | 33.727273 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from contextlib import closing
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, get_project_base_directory
class Model(BaseFlowAPI):
def load(self, config_data=None, job_id=None):
if config_data is None and job_id is None:
return {
"retcode": 100,
"retmsg": "Load model failed. No arguments received, "
"please provide one of arguments from job id and conf path."
}
if config_data is not None and job_id is not None:
return {
"retcode": 100,
"retmsg": "Load model failed. Please do not provide job id and "
"conf path at the same time."
}
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/load', json=config_data)
def bind(self, config_data, job_id=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/bind', json=config_data)
def import_model(self, config_data, from_database=False):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
if kwargs.pop('from_database'):
return self._post(url='model/restore', json=config_data)
file_path = config_data['file']
if not os.path.isabs(file_path):
file_path = os.path.join(get_project_base_directory(), file_path)
if os.path.exists(file_path):
FileNotFoundError(
'The file is obtained from the fate flow client machine, but it does not exist, '
' please check the path: {}'.format(file_path)
)
config_data['force_update'] = int(config_data.get('force_update', False))
files = {'file': open(file_path, 'rb')}
return self._post(url='model/import', data=config_data, files=files)
def export_model(self, config_data, to_database=False):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop("to_database"):
with closing(self._get(url='model/export', handle_result=False, json=config_data, stream=True)) as response:
if response.status_code == 200:
archive_file_name = re.findall("filename=(.+)", response.headers["Content-Disposition"])[0]
os.makedirs(config_data["output_path"], exist_ok=True)
archive_file_path = os.path.join(config_data["output_path"], archive_file_name)
with open(archive_file_path, 'wb') as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
response = {'retcode': 0,
'file': archive_file_path,
'retmsg': 'download successfully, please check {}'.format(archive_file_path)}
else:
response = response.json()
return response
return self._post(url='model/store', json=config_data)
def migrate(self, config_data):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/migrate', json=config_data)
def tag_model(self, job_id, tag_name, remove=False):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('remove'):
return self._post(url='model/model_tag/create', json=config_data)
else:
return self._post(url='model/model_tag/remove', json=config_data)
def tag_list(self, job_id):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/model_tag/retrieve', json=config_data)
def deploy(self, model_id, model_version, cpn_list=None, predict_dsl=None, components_checkpoint=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/deploy', json=config_data)
def get_predict_dsl(self, model_id, model_version):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/get/predict/dsl', json=config_data)
def get_predict_conf(self, model_id, model_version):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/get/predict/conf', json=config_data)
def get_model_info(self, model_id=None, model_version=None, role=None, party_id=None, query_filters=None, **kwargs):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/query', json=config_data)
def homo_convert(self, config_data):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/homo/convert', json=config_data)
def homo_deploy(self, config_data):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('deployment_type') == "kfserving":
kube_config = config_data.get('deployment_parameters', {}).get('config_file')
if kube_config:
if not os.path.isabs(kube_config):
kube_config = os.path.join(get_project_base_directory(), kube_config)
if os.path.exists(kube_config):
with open(kube_config, 'r') as fp:
config_data['deployment_parameters']['config_file_content'] = fp.read()
del config_data['deployment_parameters']['config_file']
else:
raise Exception('The kube_config file is obtained from the fate flow client machine, '
'but it does not exist, please check the path: {}'.format(kube_config))
return self._post(url='model/homo/deploy', json=config_data)
| 6,695 | 43.939597 | 120 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/privilege.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config
class Privilege(BaseFlowAPI):
def query(self, src_party_id, src_role):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['src_party_id', 'src_role'])
return self._post(url='permission/query/privilege', json=config_data)
def grant(self, src_party_id, src_role):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['src_party_id', 'src_role'])
return self._post(url='permission/grant/privilege', json=config_data)
def delete(self, src_party_id, src_role):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['src_party_id', 'src_role'])
return self._post(url='permission/delete/privilege', json=config_data)
| 1,629 | 41.894737 | 89 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/tag.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess
class Tag(BaseFlowAPI):
def create(self, tag_name, tag_desc=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/tag/create', json=config_data)
def query(self, tag_name, with_model=False):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/tag/retrieve', json=config_data)
def update(self, tag_name, new_tag_name=None, new_tag_desc=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/tag/update', json=config_data)
def delete(self, tag_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/tag/destroy', json=config_data)
def list(self, limit=10):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='model/tag/list', json=config_data)
| 1,697 | 36.733333 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/data.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from flow_sdk.client.api.base import BaseFlowAPI
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from flow_sdk.utils import preprocess, start_cluster_standalone_job_server, get_project_base_directory, json_dumps
class Data(BaseFlowAPI):
def upload(self, config_data, verbose=0, drop=0):
kwargs = locals()
kwargs['drop'] = int(kwargs['drop'])
kwargs['verbose'] = int(kwargs['verbose'])
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('use_local_data', 1):
file_name = config_data.get('file')
if not os.path.isabs(file_name):
file_name = os.path.join(get_project_base_directory(), file_name)
if os.path.exists(file_name):
with open(file_name, 'rb') as fp:
data = MultipartEncoder(
fields={'file': (os.path.basename(file_name), fp, 'application/octet-stream')}
)
tag = [0]
def read_callback(monitor):
if config_data.get('verbose') == 1:
sys.stdout.write(
"\r UPLOADING:{0}{1}".format("|" * (monitor.bytes_read * 100 // monitor.len),
'%.2f%%' % (monitor.bytes_read * 100 // monitor.len)))
sys.stdout.flush()
if monitor.bytes_read / monitor.len == 1:
tag[0] += 1
if tag[0] == 2:
sys.stdout.write('\n')
data = MultipartEncoderMonitor(data, read_callback)
return self._post(url='data/upload', data=data,
params=json_dumps(config_data), headers={'Content-Type': data.content_type})
else:
raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
'please check the path: {}'.format(file_name))
else:
return self._post(url='data/upload', json=config_data)
def download(self, config_data):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
response = self._post(url='data/download', json=config_data)
if response['retcode'] == 999:
start_cluster_standalone_job_server()
return self._post(url='data/download', json=config_data)
return response
def upload_history(self, limit=10, job_id=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
response = self._post(url='data/upload/history', json=config_data)
if response['retcode'] == 999:
start_cluster_standalone_job_server()
return self._post(url='data/upload/history', json=config_data)
return response
# TODO
def download_history(self):
pass
| 3,669 | 41.183908 | 115 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/component.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from contextlib import closing
from typing import List
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config, download_from_request
class Component(BaseFlowAPI):
def list(self, job_id):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='tracking/component/list', json=config_data)
def metrics(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
return self._post(url='tracking/component/metrics', json=config_data)
def metric_all(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
return self._post(url='tracking/component/metric/all', json=config_data)
def metric_delete(self, date=None, job_id=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('date'):
config_data['model'] = config_data.pop('date')
return self._post(url='tracking/component/metric/delete', json=config_data)
def parameters(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
return self._post(url='tracking/component/parameters', json=config_data)
def output_data(self, job_id, role, party_id, component_name, output_path, limit=-1):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id', 'output_path'])
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(config_data['job_id'],
config_data['component_name'],
config_data['role'],
config_data['party_id'])
extract_dir = os.path.join(config_data['output_path'], tar_file_name.replace('.tar.gz', ''))
with closing(self._get(url='tracking/component/output/data/download',
handle_result=False, json=config_data, stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
response = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
except BaseException:
response = {'retcode': 100,
'retmsg': 'download failed, please check if the parameters are correct'}
else:
response = response.json()
return response
def output_model(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
return self._post(url='tracking/component/output/model', json=config_data)
def output_data_table(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
return self._post(url='tracking/component/output/data/table', json=config_data)
def get_summary(self, job_id, role, party_id, component_name):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data,
required_arguments=['job_id', 'component_name', 'role', 'party_id'])
res = self._post(url='tracking/component/summary/download', json=config_data)
if not res.get('data'):
res['data'] = {}
return res
def hetero_model_merge(
self,
model_id: str, model_version: str, guest_party_id: str, host_party_ids: List[str],
component_name: str, model_type: str, output_format: str, target_name: str = None,
host_rename: bool = None, include_guest_coef: bool = None,
):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=(
'model_id', 'model_version', 'guest_party_id', 'host_party_ids',
'component_name', 'model_type', 'output_format',
))
res = self._post(url='component/hetero/merge', json=config_data)
return res
def woe_array_extract(
self,
model_id: str, model_version: str, party_id: str, role: str, component_name: str,
):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=(
'model_id', 'model_version', 'party_id', 'role', 'component_name',
))
res = self._post(url='component/woe_array/extract', json=config_data)
return res
def woe_array_merge(
self,
model_id: str, model_version: str, party_id: str, role: str, component_name: str,
woe_array: dict,
):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=(
'model_id', 'model_version', 'party_id', 'role', 'component_name',
'woe_array',
))
res = self._post(url='component/woe_array/merge', json=config_data)
return res
| 6,888 | 44.026144 | 119 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/version.py
|
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.base import BaseFlowAPI
class Version(BaseFlowAPI):
def api(self):
return self._post(url='version/get').get('data', {}).get('API')
def fate(self):
return self._post(url='version/get', json={'module': 'FATE'}).get('data', {}).get('FATE')
def fate_flow(self):
return self._post(url='version/get', json={'module': 'FATEFlow'}).get('data', {}).get('FATEFlow')
def fate_board(self):
return self._post(url='version/get', json={'module': 'FATEBoard'}).get('data', {}).get('FATEBoard')
def centos(self):
return self._post(url='version/get', json={'module': 'CENTOS'}).get('data', {}).get('CENTOS')
def ubuntu(self):
return self._post(url='version/get', json={'module': 'UBUNTU'}).get('data', {}).get('UBUNTU')
def python(self):
return self._post(url='version/get', json={'module': 'PYTHON'}).get('data', {}).get('PYTHON')
def jdk(self):
return self._post(url='version/get', json={'module': 'JDK'}).get('data', {}).get('JDK')
def maven(self):
return self._post(url='version/get', json={'module': 'MAVEN'}).get('data', {}).get('MAVEN')
def eggroll(self):
return self._post(url='version/get', json={'module': 'EGGROLL'}).get('data', {}).get('EGGROLL')
def spark(self):
return self._post(url='version/get', json={'module': 'SPARK'}).get('data', {}).get('SPARK')
| 2,036 | 37.433962 | 107 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flow_sdk.client.api.job import Job
from flow_sdk.client.api.component import Component
from flow_sdk.client.api.data import Data
from flow_sdk.client.api.model import Model
from flow_sdk.client.api.privilege import Privilege
from flow_sdk.client.api.queue import Queue
from flow_sdk.client.api.table import Table
from flow_sdk.client.api.task import Task
from flow_sdk.client.api.tag import Tag
from flow_sdk.client.api.checkpoint import Checkpoint
from flow_sdk.client.api.version import Version
from flow_sdk.client.api.test import Test
| 1,160 | 40.464286 | 75 |
py
|
FATE
|
FATE-master/python/fate_client/flow_sdk/client/api/job.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from contextlib import closing
from flow_sdk.client.api.base import BaseFlowAPI
from flow_sdk.utils import preprocess, check_config, download_from_request
class Job(BaseFlowAPI):
def list(self, limit=10):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/list/job', json=config_data)
def view(self, job_id=None, role=None, party_id=None, status=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/data/view/query', json=config_data)
def submit(self, config_data, dsl_data=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/submit', json={
'job_runtime_conf': config_data,
'job_dsl': dsl_data,
})
def stop(self, job_id):
job_id = str(job_id)
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id'])
return self._post(url='job/stop', json=config_data)
def query(self, job_id=None, role=None, party_id=None, component_name=None, status=None):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
return self._post(url='job/query', json=config_data)
def config(self, job_id, role, party_id, output_path):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id', 'role', 'party_id', 'output_path'])
response = self._post(url='job/config', json=config_data)
if response['retcode'] == 0:
job_id = response['data']['job_id']
download_directory = os.path.join(config_data['output_path'], 'job_{}_config'.format(job_id))
os.makedirs(download_directory, exist_ok=True)
for k, v in response['data'].items():
if k == 'job_id':
continue
with open('{}/{}.json'.format(download_directory, k), 'w') as fw:
json.dump(v, fw, indent=4)
del response['data']['dsl']
del response['data']['runtime_conf']
response['directory'] = download_directory
response['retmsg'] = 'download successfully, please check {} directory'.format(download_directory)
return response
def log(self, job_id, output_path):
kwargs = locals()
config_data, dsl_data = preprocess(**kwargs)
check_config(config=config_data, required_arguments=['job_id', 'output_path'])
job_id = config_data['job_id']
tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id))
with closing(self._post(url='job/log/download', handle_result=False, json=config_data, stream=True)) as response:
if response.status_code == 200:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
response = {'retcode': 0,
'directory': extract_dir,
'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
else:
response = response.json()
return response
def generate_dsl(self, train_dsl, cpn):
"""
@param train_dsl: dict or str
@param cpn: list or str
"""
if isinstance(train_dsl, dict):
train_dsl = json.dumps(train_dsl)
config_data = {
"cpn_str": cpn,
"train_dsl": train_dsl,
"version": "2"
}
res = self._post(url="job/dsl/generate", handle_result=True, json=config_data)
if not res.get("data"):
res["data"] = {}
return res
# TODO complete it in next version
# def clean(self, job_id=None, role=None, party_id=None, component_name=None):
# kwargs = locals()
# config_data, dsl_data = preprocess(**kwargs)
# check_config(config=config_data, required_arguments=['job_id'])
# return self._post(url='job/clean', json=config_data)
| 4,908 | 39.908333 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/model_base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import typing
import numpy as np
from google.protobuf import json_format
from fate_arch.computing import is_table
from federatedml.callbacks.callback_list import CallbackList
from federatedml.feature.instance import Instance
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.protobuf import deserialize_models
from federatedml.statistic.data_overview import header_alignment, predict_detail_dict_to_str
from federatedml.util import LOGGER, abnormal_detection
from federatedml.util.anonymous_generator_util import Anonymous
from federatedml.util.component_properties import ComponentProperties, RunningFuncs
from federatedml.util.io_check import assert_match_id_consistent
def serialize_models(models):
serialized_models: typing.Dict[str, typing.Tuple[str, bytes, dict]] = {}
for model_name, buffer_object in models.items():
serialized_string = buffer_object.SerializeToString()
pb_name = type(buffer_object).__name__
json_format_dict = json_format.MessageToDict(
buffer_object, including_default_value_fields=True
)
serialized_models[model_name] = (
pb_name,
serialized_string,
json_format_dict,
)
return serialized_models
class ComponentOutput:
def __init__(self, data, models, cache: typing.List[tuple]) -> None:
self._data = data
if not isinstance(self._data, list):
self._data = [data]
self._models = models
if self._models is None:
self._models = {}
self._cache = cache
if not isinstance(self._cache, list):
self._cache = [cache]
@property
def data(self) -> list:
return self._data
@property
def model(self):
return serialize_models(self._models)
@property
def cache(self):
return self._cache
class MetricType:
LOSS = "LOSS"
class Metric:
def __init__(self, key, value: float, timestamp: float = None):
self.key = key
self.value = value
self.timestamp = timestamp
def to_dict(self):
return dict(key=self.key, value=self.value, timestamp=self.timestamp)
class MetricMeta:
def __init__(self, name: str, metric_type: MetricType, extra_metas: dict = None):
self.name = name
self.metric_type = metric_type
self.metas = {}
self.extra_metas = extra_metas
def update_metas(self, metas: dict):
self.metas.update(metas)
def to_dict(self):
return dict(
name=self.name,
metric_type=self.metric_type,
metas=self.metas,
extra_metas=self.extra_metas,
)
class CallbacksVariable(object):
def __init__(self):
self.stop_training = False
self.best_iteration = -1
self.validation_summary = None
class WarpedTrackerClient:
def __init__(self, tracker) -> None:
self._tracker = tracker
def log_metric_data(
self, metric_namespace: str, metric_name: str, metrics: typing.List[Metric]
):
return self._tracker.log_metric_data(
metric_namespace=metric_namespace,
metric_name=metric_name,
metrics=[metric.to_dict() for metric in metrics],
)
def set_metric_meta(
self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta
):
return self._tracker.set_metric_meta(
metric_namespace=metric_namespace,
metric_name=metric_name,
metric_meta=metric_meta.to_dict(),
)
def log_component_summary(self, summary_data: dict):
return self._tracker.log_component_summary(summary_data=summary_data)
class ModelBase(object):
component_name = None
@classmethod
def set_component_name(cls, name):
cls.component_name = name
@classmethod
def get_component_name(cls):
return cls.component_name
def __init__(self):
self.model_output = None
self.mode = None
self.role = None
self.data_output = None
self.cache_output = None
self.model_param = None
self.transfer_variable = None
self.flowid = ""
self.task_version_id = ""
self.need_one_vs_rest = False
self.callback_one_vs_rest = False
self.checkpoint_manager = None
self.cv_fold = 0
self.validation_freqs = None
self.component_properties = ComponentProperties()
self._summary = dict()
self._align_cache = dict()
self._tracker = None
self.step_name = "step_name"
self.callback_list: CallbackList
self.callback_variables = CallbacksVariable()
self.anonymous_generator = None
@property
def tracker(self) -> WarpedTrackerClient:
if self._tracker is None:
raise RuntimeError(f"use tracker before set")
return self._tracker
@tracker.setter
def tracker(self, value):
self._tracker = WarpedTrackerClient(value)
@property
def stop_training(self):
return self.callback_variables.stop_training
@property
def need_cv(self):
return self.component_properties.need_cv
@property
def need_run(self):
return self.component_properties.need_run
@need_run.setter
def need_run(self, value: bool):
self.component_properties.need_run = value
def _init_model(self, model):
pass
def load_model(self, model_dict):
pass
def _parse_need_run(self, model_dict, model_meta_name):
meta_obj = list(model_dict.get("model").values())[0].get(model_meta_name)
need_run = meta_obj.need_run
# self.need_run = need_run
self.component_properties.need_run = need_run
def run(self, cpn_input, retry: bool = True):
self.task_version_id = cpn_input.task_version_id
self.tracker = cpn_input.tracker
self.checkpoint_manager = cpn_input.checkpoint_manager
deserialize_models(cpn_input.models)
# retry
if (
retry
and hasattr(self, '_retry')
and callable(self._retry)
and self.checkpoint_manager is not None
and self.checkpoint_manager.latest_checkpoint is not None
):
self._retry(cpn_input=cpn_input)
# normal
else:
self._run(cpn_input=cpn_input)
return ComponentOutput(self.save_data(), self._export(), self.save_cache())
def _export(self):
# export model
try:
model = self._export_model()
meta = self._export_meta()
export_dict = {"Meta": meta, "Param": model}
except NotImplementedError:
export_dict = self.export_model()
# export nothing, return
if export_dict is None:
return export_dict
try:
meta_name = [k for k in export_dict if k.endswith("Meta")][0]
except BaseException:
raise KeyError("Meta not found in export model")
try:
param_name = [k for k in export_dict if k.endswith("Param")][0]
except BaseException:
raise KeyError("Param not found in export model")
meta = export_dict[meta_name]
# set component name
if hasattr(meta, "component"):
meta.component = self.get_component_name()
else:
import warnings
warnings.warn(f"{meta} should add `component` field")
return export_dict
def _export_meta(self):
raise NotImplementedError("_export_meta not implemented")
def _export_model(self):
raise NotImplementedError("_export_model not implemented")
def _run(self, cpn_input) -> None:
# paramters
self.model_param.update(cpn_input.parameters)
self.model_param.check()
self.component_properties.parse_component_param(
cpn_input.roles, self.model_param
)
self.role = self.component_properties.role
self.component_properties.parse_dsl_args(cpn_input.datasets, cpn_input.models)
self.component_properties.parse_caches(cpn_input.caches)
self.anonymous_generator = Anonymous(role=self.role, party_id=self.component_properties.local_partyid)
# init component, implemented by subclasses
self._init_model(self.model_param)
self.callback_list = CallbackList(self.role, self.mode, self)
if hasattr(self.model_param, "callback_param"):
callback_param = getattr(self.model_param, "callback_param")
self.callback_list.init_callback_list(callback_param)
running_funcs = self.component_properties.extract_running_rules(
datasets=cpn_input.datasets, models=cpn_input.models, cpn=self
)
LOGGER.debug(f"running_funcs: {running_funcs.todo_func_list}")
saved_result = []
for func, params, save_result, use_previews in running_funcs:
# for func, params in zip(todo_func_list, todo_func_params):
if use_previews:
if params:
real_param = [saved_result, params]
else:
real_param = saved_result
LOGGER.debug("func: {}".format(func))
this_data_output = func(*real_param)
saved_result = []
else:
this_data_output = func(*params)
if save_result:
saved_result.append(this_data_output)
if len(saved_result) == 1:
self.data_output = saved_result[0]
# LOGGER.debug("One data: {}".format(self.data_output.first()[1].features))
LOGGER.debug(
"saved_result is : {}, data_output: {}".format(
saved_result, self.data_output
)
)
# self.check_consistency()
self.save_summary()
def _retry(self, cpn_input) -> None:
self.model_param.update(cpn_input.parameters)
self.model_param.check()
self.component_properties.parse_component_param(
cpn_input.roles, self.model_param
)
self.role = self.component_properties.role
self.component_properties.parse_dsl_args(cpn_input.datasets, cpn_input.models)
self.component_properties.parse_caches(cpn_input.caches)
# init component, implemented by subclasses
self._init_model(self.model_param)
self.callback_list = CallbackList(self.role, self.mode, self)
if hasattr(self.model_param, "callback_param"):
callback_param = getattr(self.model_param, "callback_param")
self.callback_list.init_callback_list(callback_param)
(
train_data,
validate_data,
test_data,
data,
) = self.component_properties.extract_input_data(
datasets=cpn_input.datasets, model=self
)
running_funcs = RunningFuncs()
latest_checkpoint = self.get_latest_checkpoint()
running_funcs.add_func(self.load_model, [latest_checkpoint])
running_funcs = self.component_properties.warm_start_process(
running_funcs, self, train_data, validate_data
)
LOGGER.debug(f"running_funcs: {running_funcs.todo_func_list}")
self._execute_running_funcs(running_funcs)
def _execute_running_funcs(self, running_funcs):
saved_result = []
for func, params, save_result, use_previews in running_funcs:
# for func, params in zip(todo_func_list, todo_func_params):
if use_previews:
if params:
real_param = [saved_result, params]
else:
real_param = saved_result
LOGGER.debug("func: {}".format(func))
detected_func = assert_match_id_consistent(func)
this_data_output = detected_func(*real_param)
saved_result = []
else:
detected_func = assert_match_id_consistent(func)
this_data_output = detected_func(*params)
if save_result:
saved_result.append(this_data_output)
if len(saved_result) == 1:
self.data_output = saved_result[0]
LOGGER.debug(
"saved_result is : {}, data_output: {}".format(
saved_result, self.data_output
)
)
self.save_summary()
def export_serialized_models(self):
return serialize_models(self.export_model())
def get_metrics_param(self):
return EvaluateParam(eval_type="binary", pos_label=1)
def check_consistency(self):
if not is_table(self.data_output):
return
if (
self.component_properties.input_data_count
+ self.component_properties.input_eval_data_count
!= self.data_output.count()
and self.component_properties.input_data_count
!= self.component_properties.input_eval_data_count
):
raise ValueError("Input data count does not match with output data count")
def predict(self, data_inst):
pass
def fit(self, *args):
pass
def transform(self, data_inst):
pass
def cross_validation(self, data_inst):
pass
def stepwise(self, data_inst):
pass
def one_vs_rest_fit(self, train_data=None):
pass
def one_vs_rest_predict(self, train_data):
pass
def init_validation_strategy(self, train_data=None, validate_data=None):
pass
def save_data(self):
return self.data_output
def export_model(self):
return self.model_output
def save_cache(self):
return self.cache_output
def set_flowid(self, flowid):
# self.flowid = '.'.join([self.task_version_id, str(flowid)])
self.flowid = flowid
self.set_transfer_variable()
def set_transfer_variable(self):
if self.transfer_variable is not None:
LOGGER.debug(
"set flowid to transfer_variable, flowid: {}".format(self.flowid)
)
self.transfer_variable.set_flowid(self.flowid)
def set_task_version_id(self, task_version_id):
"""task_version_id: jobid + component_name, reserved variable"""
self.task_version_id = task_version_id
def get_metric_name(self, name_prefix):
if not self.need_cv:
return name_prefix
return "_".join(map(str, [name_prefix, self.flowid]))
def set_tracker(self, tracker):
self._tracker = tracker
def set_checkpoint_manager(self, checkpoint_manager):
checkpoint_manager.load_checkpoints_from_disk()
self.checkpoint_manager = checkpoint_manager
@staticmethod
def set_predict_data_schema(predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {
"header": [
"label",
"predict_result",
"predict_score",
"predict_detail",
"type",
],
"sid": schema.get("sid"),
"content_type": "predict_result"
}
if schema.get("match_id_name") is not None:
predict_data.schema["match_id_name"] = schema.get("match_id_name")
return predict_data
@staticmethod
def predict_score_to_output(
data_instances, predict_score, classes=None, threshold=0.5
):
"""
Get predict result output
Parameters
----------
data_instances: table, data used for prediction
predict_score: table, probability scores
classes: list or None, all classes/label names
threshold: float, predict threshold, used for binary label
Returns
-------
Table, predict result
"""
# regression
if classes is None:
predict_result = data_instances.join(
predict_score, lambda d, pred: [d.label,
pred,
pred,
predict_detail_dict_to_str({"label": pred})]
)
# binary
elif isinstance(classes, list) and len(classes) == 2:
class_neg, class_pos = classes[0], classes[1]
pred_label = predict_score.mapValues(
lambda x: class_pos if x > threshold else class_neg
)
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(predict_score, lambda x, y: (x, y))
class_neg_name, class_pos_name = str(class_neg), str(class_pos)
predict_result = predict_result.join(
pred_label,
lambda x, y: [
x[0],
y,
x[1],
predict_detail_dict_to_str({class_neg_name: (1 - x[1]), class_pos_name: x[1]})
],
)
# multi-label: input = array of predicted score of all labels
elif isinstance(classes, list) and len(classes) > 2:
# pred_label = predict_score.mapValues(lambda x: classes[x.index(max(x))])
classes = [str(val) for val in classes]
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(
predict_score,
lambda x, y: [
x,
int(classes[np.argmax(y)]),
float(np.max(y)),
predict_detail_dict_to_str(dict(zip(classes, list(y))))
],
)
else:
raise ValueError(
f"Model's classes type is {type(classes)}, classes must be None or list of length no less than 2."
)
def _transfer(instance, pred_res):
return Instance(features=pred_res, inst_id=instance.inst_id)
predict_result = data_instances.join(predict_result, _transfer)
return predict_result
def callback_meta(self, metric_name, metric_namespace, metric_meta: MetricMeta):
if self.need_cv:
metric_name = ".".join([metric_name, str(self.cv_fold)])
flow_id_list = self.flowid.split(".")
LOGGER.debug(
"Need cv, change callback_meta, flow_id_list: {}".format(flow_id_list)
)
if len(flow_id_list) > 1:
curve_name = ".".join(flow_id_list[1:])
metric_meta.update_metas({"curve_name": curve_name})
else:
metric_meta.update_metas({"curve_name": metric_name})
self.tracker.set_metric_meta(
metric_name=metric_name,
metric_namespace=metric_namespace,
metric_meta=metric_meta,
)
def callback_metric(
self, metric_name, metric_namespace, metric_data: typing.List[Metric]
):
if self.need_cv:
metric_name = ".".join([metric_name, str(self.cv_fold)])
self.tracker.log_metric_data(
metric_name=metric_name,
metric_namespace=metric_namespace,
metrics=metric_data,
)
def callback_warm_start_init_iter(self, iter_num):
metric_meta = MetricMeta(
name="train",
metric_type="init_iter",
extra_metas={
"unit_name": "iters",
},
)
self.callback_meta(
metric_name="init_iter", metric_namespace="train", metric_meta=metric_meta
)
self.callback_metric(
metric_name="init_iter",
metric_namespace="train",
metric_data=[Metric("init_iter", iter_num)],
)
def get_latest_checkpoint(self):
return self.checkpoint_manager.latest_checkpoint.read()
def save_summary(self):
self.tracker.log_component_summary(summary_data=self.summary())
def set_cv_fold(self, cv_fold):
self.cv_fold = cv_fold
def summary(self):
return copy.deepcopy(self._summary)
def set_summary(self, new_summary):
"""
Model summary setter
Parameters
----------
new_summary: dict, summary to replace the original one
Returns
-------
"""
if not isinstance(new_summary, dict):
raise ValueError(
f"summary should be of dict type, received {type(new_summary)} instead."
)
self._summary = copy.deepcopy(new_summary)
def add_summary(self, new_key, new_value):
"""
Add key:value pair to model summary
Parameters
----------
new_key: str
new_value: object
Returns
-------
"""
original_value = self._summary.get(new_key, None)
if original_value is not None:
LOGGER.warning(
f"{new_key} already exists in model summary."
f"Corresponding value {original_value} will be replaced by {new_value}"
)
self._summary[new_key] = new_value
# LOGGER.debug(f"{new_key}: {new_value} added to summary.")
def merge_summary(self, new_content, suffix=None, suffix_sep="_"):
"""
Merge new content into model summary
Parameters
----------
new_content: dict, content to be merged into summary
suffix: str or None, suffix used to create new key if any key in new_content already exixts in model summary
suffix_sep: string, default '_', suffix separator used to create new key
Returns
-------
"""
if not isinstance(new_content, dict):
raise ValueError(
f"To merge new content into model summary, "
f"value must be of dict type, received {type(new_content)} instead."
)
new_summary = self.summary()
keyset = new_summary.keys() | new_content.keys()
for key in keyset:
if key in new_summary and key in new_content:
if suffix is not None:
new_key = f"{key}{suffix_sep}{suffix}"
else:
new_key = key
new_value = new_content.get(key)
new_summary[new_key] = new_value
elif key in new_content:
new_summary[key] = new_content.get(key)
else:
pass
self.set_summary(new_summary)
@staticmethod
def extract_data(data: dict):
LOGGER.debug("In extract_data, data input: {}".format(data))
if len(data) == 0:
return data
if len(data) == 1:
return list(data.values())[0]
return data
@staticmethod
def check_schema_content(schema):
"""
check for repeated header & illegal/non-printable chars except for space
allow non-ascii chars
:param schema: dict
:return:
"""
abnormal_detection.check_legal_schema(schema)
def align_data_header(self, data_instances, pre_header):
"""
align features of given data, raise error if value in given schema not found
:param data_instances: data table
:param pre_header: list, header of model
:return: dtable, aligned data
"""
result_data = self._align_cache.get(id(data_instances))
if result_data is None:
result_data = header_alignment(
data_instances=data_instances, pre_header=pre_header
)
self._align_cache[id(data_instances)] = result_data
return result_data
@staticmethod
def pass_data(data):
if isinstance(data, dict) and len(data) >= 1:
data = list(data.values())[0]
return data
def obtain_data(self, data_list):
if isinstance(data_list, list):
return data_list[0]
return data_list
| 25,116 | 32.26755 | 116 |
py
|
FATE
|
FATE-master/python/federatedml/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616 | 37.5625 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/nn/__init__.py
|
from federatedml.nn.dataset.base import Dataset
from federatedml.nn.homo.trainer.trainer_base import TrainerBase
__all__ = ['Dataset', 'TrainerBase']
| 151 | 29.4 | 64 |
py
|
FATE
|
FATE-master/python/federatedml/nn/model_zoo/graphsage.py
|
import torch as t
from torch import nn
from torch.nn import Module
import torch_geometric.nn as pyg
class Sage(nn.Module):
def __init__(self, in_channels, hidden_channels, class_num):
super().__init__()
self.model = nn.ModuleList([
pyg.SAGEConv(in_channels=in_channels, out_channels=hidden_channels, project=True),
pyg.SAGEConv(in_channels=hidden_channels, out_channels=class_num),
nn.LogSoftmax()]
)
def forward(self, x, edge_index):
for i, conv in enumerate(self.model):
if isinstance(conv, pyg.SAGEConv):
x = conv(x, edge_index)
else:
x = conv(x)
return x
| 702 | 29.565217 | 94 |
py
|
FATE
|
FATE-master/python/federatedml/nn/model_zoo/homographsage.py
|
import torch as t
from torch import nn
from torch.nn import Module
import torch_geometric.nn as pyg
class Sage(nn.Module):
def __init__(self, in_channels, hidden_channels, class_num):
super().__init__()
self.model = nn.ModuleList([
pyg.SAGEConv(in_channels=in_channels, out_channels=hidden_channels, project=True),
pyg.SAGEConv(in_channels=hidden_channels, out_channels=class_num),
nn.LogSoftmax()]
)
def forward(self, x, edge_index):
for i, conv in enumerate(self.model):
if isinstance(conv, pyg.SAGEConv):
x = conv(x, edge_index)
else:
x = conv(x)
return x
| 703 | 28.333333 | 94 |
py
|
FATE
|
FATE-master/python/federatedml/nn/model_zoo/vision.py
|
import torch as t
from torchvision.models import get_model
class TorchVisionModels(t.nn.Module):
"""
This Class provides ALL torchvision classification models,
instantiate models and using pretrained weights by providing string model name and weight names
Parameters
----------
vision_model_name: str, name of models provided by torchvision.models, for all available vision model, see:
https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights
pretrain_weights: str, name of pretrained weight, for available vision weights, see:
https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights
"""
def __init__(self, vision_model_name: str, pretrain_weights: str = None):
super(TorchVisionModels, self).__init__()
self.model = get_model(vision_model_name, weights=pretrain_weights)
def forward(self, x):
return self.model(x)
def __repr__(self):
return self.model.__repr__()
| 1,062 | 38.37037 | 114 |
py
|
FATE
|
FATE-master/python/federatedml/nn/model_zoo/pretrained_bert.py
|
from transformers.models.bert import BertModel
from torch.nn import Module
from federatedml.util import LOGGER
class PretrainedBert(Module):
def __init__(self, pretrained_model_name_or_path: str = 'bert-base-uncased', freeze_weight=False):
"""
A pretrained Bert Model based on transformers
Parameters
----------
pretrained_model_name_or_path: string, specify the version of bert pretrained model,
for all available bert model, see:
https://huggingface.co/bert-base-uncased?text=The+goal+of+life+is+%5BMASK%5D.#model-variations
or it can be a path to downloaded bert model
freeze_weight: bool, freeze weight or not when training. if True, bert model will not be added to parameters,
and skip grad calculation
"""
super(PretrainedBert, self).__init__()
self.pretrained_model_str = pretrained_model_name_or_path
self.freeze_weight = freeze_weight
LOGGER.info(
'if you are using non-local models, it will download the pretrained model and will take'
'some time')
self.model = BertModel.from_pretrained(
pretrained_model_name_or_path=self.pretrained_model_str)
if self.freeze_weight:
self.model.requires_grad_(False)
def forward(self, x):
return self.model(x)
def parameters(self, recurse: bool = True):
if self.freeze_weight:
return (),
else:
return self.model.parameters(recurse=recurse)
| 1,566 | 38.175 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/nn/dataset/base.py
|
from torch.utils.data import Dataset as Dataset_
from federatedml.nn.backend.utils.common import ML_PATH, LLM_PATH
import importlib
import abc
import numpy as np
class Dataset(Dataset_):
def __init__(self, **kwargs):
super(Dataset, self).__init__()
self._type = 'local' # train/predict
self._check = False
self._generated_ids = None
self.training = True
@property
def dataset_type(self):
if not hasattr(self, '_type'):
raise AttributeError(
'type variable not exists, call __init__ of super class')
return self._type
@dataset_type.setter
def dataset_type(self, val):
self._type = val
def has_dataset_type(self):
return self.dataset_type
def set_type(self, _type):
self.dataset_type = _type
def get_type(self):
return self.dataset_type
def has_sample_ids(self):
# if not implement get_sample_ids, return False
try:
sample_ids = self.get_sample_ids()
except NotImplementedError as e:
return False
except BaseException as e:
raise e
if sample_ids is None:
return False
else:
if not self._check:
assert isinstance(
sample_ids, list), 'get_sample_ids() must return a list contains str or integer'
for id_ in sample_ids:
if (not isinstance(id_, str)) and (not isinstance(id_, int)):
raise RuntimeError(
'get_sample_ids() must return a list contains str or integer: got id of type {}:{}'.format(
id_, type(id_)))
assert len(sample_ids) == len(
self), 'sample id len:{} != dataset length:{}'.format(len(sample_ids), len(self))
self._check = True
return True
def init_sid_and_getfunc(self, prefix: str = None):
if prefix is not None:
assert isinstance(
prefix, str), 'prefix must be a str, but got {}'.format(prefix)
else:
prefix = self._type
generated_ids = []
for i in range(0, self.__len__()):
generated_ids.append(prefix + '_' + str(i))
self._generated_ids = generated_ids
def get_func():
return self._generated_ids
self.get_sample_ids = get_func
"""
Functions for users
"""
def train(self, ):
self.training = True
def eval(self, ):
self.training = False
# Function to implemented
@abc.abstractmethod
def load(self, file_path):
raise NotImplementedError(
'You must implement load function so that Client can pass file-path to this '
'class')
def __getitem__(self, item):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def get_classes(self):
raise NotImplementedError()
def get_sample_ids(self):
raise NotImplementedError()
class ShuffleWrapDataset(Dataset_):
def __init__(self, dataset: Dataset, shuffle_seed=100):
super(ShuffleWrapDataset, self).__init__()
self.ds = dataset
ids = self.ds.get_sample_ids()
sort_idx = np.argsort(np.array(ids))
assert isinstance(dataset, Dataset)
self.idx = sort_idx
if shuffle_seed is not None:
np.random.seed(shuffle_seed)
self.shuffled_idx = np.copy(self.idx)
np.random.shuffle(self.shuffled_idx)
else:
self.shuffled_idx = np.copy(self.idx)
self.idx_map = {k: v for k, v in zip(self.idx, self.shuffled_idx)}
def train(self, ):
self.ds.train()
def eval(self, ):
self.ds.eval()
def __getitem__(self, item):
return self.ds[self.idx_map[self.idx[item]]]
def __len__(self):
return len(self.ds)
def __repr__(self):
return self.ds.__repr__()
def has_sample_ids(self):
return self.ds.has_sample_ids()
def set_shuffled_idx(self, idx_map: dict):
self.shuffled_idx = np.array(list(idx_map.values()))
self.idx_map = idx_map
def get_sample_ids(self):
ids = self.ds.get_sample_ids()
return np.array(ids)[self.shuffled_idx].tolist()
def get_classes(self):
return self.ds.get_classes()
def get_dataset_class(dataset_module_name: str):
if dataset_module_name.endswith('.py'):
dataset_module_name = dataset_module_name.replace('.py', '')
try:
ds_modules = importlib.import_module(
'{}.dataset.{}'.format(
ML_PATH, dataset_module_name)
)
except BaseException:
ds_modules = importlib.import_module(
'{}.dataset.{}'.format(
LLM_PATH, dataset_module_name)
)
try:
ds = []
for k, v in ds_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, Dataset) and v is not Dataset:
ds.append(v)
if len(ds) == 0:
raise ValueError('Did not find any class in {}.py that is the subclass of Dataset class'.
format(dataset_module_name))
else:
return ds[-1] # return the last defined class
except ValueError as e:
raise e
| 5,430 | 28.677596 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/nn/dataset/image.py
|
import torch
from federatedml.nn.dataset.base import Dataset
from torchvision.datasets import ImageFolder
from torchvision import transforms
import numpy as np
class ImageDataset(Dataset):
"""
A basic Image Dataset built on pytorch ImageFolder, supports simple image transform
Given a folder path, ImageDataset will load images from this folder, images in this
folder need to be organized in a Torch-ImageFolder format, see
https://pytorch.org/vision/main/generated/torchvision.datasets.ImageFolder.html for details.
Image name will be automatically taken as the sample id.
Parameters
----------
center_crop : bool, use center crop transformer
center_crop_shape: tuple or list
generate_id_from_file_name: bool, whether to take image name as sample id
file_suffix: str, default is '.jpg', if generate_id_from_file_name is True, will remove this suffix from file name,
result will be the sample id
return_label: bool, return label or not, this option is for host dataset, when running hetero-NN
float64: bool, returned image tensors will be transformed to double precision
label_dtype: str, long, float, or double, the dtype of return label
"""
def __init__(self, center_crop=False, center_crop_shape=None,
generate_id_from_file_name=True, file_suffix='.jpg',
return_label=True, float64=False, label_dtype='long'):
super(ImageDataset, self).__init__()
self.image_folder: ImageFolder = None
self.center_crop = center_crop
self.size = center_crop_shape
self.return_label = return_label
self.generate_id_from_file_name = generate_id_from_file_name
self.file_suffix = file_suffix
self.float64 = float64
self.dtype = torch.float32 if not self.float64 else torch.float64
avail_label_type = ['float', 'long', 'double']
self.sample_ids = None
assert label_dtype in avail_label_type, 'available label dtype : {}'.format(
avail_label_type)
if label_dtype == 'double':
self.label_dtype = torch.float64
elif label_dtype == 'long':
self.label_dtype = torch.int64
else:
self.label_dtype = torch.float32
def load(self, folder_path):
# read image from folders
if self.center_crop:
transformer = transforms.Compose(
[transforms.CenterCrop(size=self.size), transforms.ToTensor()])
else:
transformer = transforms.Compose([transforms.ToTensor()])
if folder_path.endswith('/'):
folder_path = folder_path[: -1]
image_folder_path = folder_path
folder = ImageFolder(root=image_folder_path, transform=transformer)
self.image_folder = folder
if self.generate_id_from_file_name:
# use image name as its sample id
file_name = self.image_folder.imgs
ids = []
for name in file_name:
sample_id = name[0].split(
'/')[-1].replace(self.file_suffix, '')
ids.append(sample_id)
self.sample_ids = ids
def __getitem__(self, item):
if self.return_label:
item = self.image_folder[item]
return item[0].type(
self.dtype), torch.tensor(
item[1]).type(
self.label_dtype)
else:
return self.image_folder[item][0].type(self.dtype)
def __len__(self):
return len(self.image_folder)
def __repr__(self):
return self.image_folder.__repr__()
def get_classes(self):
return np.unique(self.image_folder.targets).tolist()
def get_sample_ids(self):
return self.sample_ids
if __name__ == '__main__':
pass
| 3,837 | 35.552381 | 119 |
py
|
FATE
|
FATE-master/python/federatedml/nn/dataset/table.py
|
import numpy as np
import pandas as pd
from federatedml.statistic.data_overview import with_weight
from federatedml.nn.dataset.base import Dataset
from federatedml.util import LOGGER
class TableDataset(Dataset):
"""
A Table Dataset, load data from a give csv path, or transform FATE DTable
Parameters
----------
label_col str, name of label column in csv, if None, will automatically take 'y' or 'label' or 'target' as label
feature_dtype dtype of feature, supports int, long, float, double
label_dtype: dtype of label, supports int, long, float, double
label_shape: list or tuple, the shape of label
flatten_label: bool, flatten extracted label column or not, default is False
"""
def __init__(
self,
label_col=None,
feature_dtype='float',
label_dtype='float',
label_shape=None,
flatten_label=False):
super(TableDataset, self).__init__()
self.with_label = True
self.with_sample_weight = False
self.features: np.ndarray = None
self.label: np.ndarray = None
self.sample_weights: np.ndarray = None
self.origin_table: pd.DataFrame = pd.DataFrame()
self.label_col = label_col
self.f_dtype = self.check_dtype(feature_dtype)
self.l_dtype = self.check_dtype(label_dtype)
if label_shape is not None:
assert isinstance(label_shape, tuple) or isinstance(
label_shape, list), 'label shape is {}'.format(label_shape)
self.label_shape = label_shape
self.flatten_label = flatten_label
# ids, match ids is for FATE match id system
self.sample_ids = None
self.match_ids = None
if self.label_col is not None:
assert isinstance(self.label_col, str) or isinstance(
self.label_col, int), 'label columns parameter must be a str or an int'
@staticmethod
def check_dtype(dtype):
if dtype is not None:
avail = ['long', 'int', 'float', 'double']
assert dtype in avail, 'available dtype is {}, but got {}'.format(
avail, dtype)
if dtype == 'long':
return np.int64
if dtype == 'int':
return np.int32
if dtype == 'float':
return np.float32
if dtype == 'double':
return np.float64
return dtype
def __getitem__(self, item):
if self.with_label:
if self.with_sample_weight and self.training:
return self.features[item], (self.label[item], self.sample_weights[item])
else:
return self.features[item], self.label[item]
else:
return self.features[item]
def __len__(self):
return len(self.origin_table)
def load(self, file_path):
if isinstance(file_path, str):
self.origin_table = pd.read_csv(file_path)
elif isinstance(file_path, pd.DataFrame):
self.origin_table = file_path
else:
# if is FATE DTable, collect data and transform to array format
data_inst = file_path
self.with_sample_weight = with_weight(data_inst)
LOGGER.info('collecting FATE DTable, with sample weight is {}'.format(self.with_sample_weight))
header = data_inst.schema["header"]
LOGGER.debug('input dtable header is {}'.format(header))
data = list(data_inst.collect())
data_keys = [key for (key, val) in data]
data_keys_map = dict(zip(sorted(data_keys), range(len(data_keys))))
keys = [None for idx in range(len(data_keys))]
x_ = [None for idx in range(len(data_keys))]
y_ = [None for idx in range(len(data_keys))]
match_ids = {}
sample_weights = [1 for idx in range(len(data_keys))]
for (key, inst) in data:
idx = data_keys_map[key]
keys[idx] = key
x_[idx] = inst.features
y_[idx] = inst.label
match_ids[key] = inst.inst_id
if self.with_sample_weight:
sample_weights[idx] = inst.weight
x_ = np.asarray(x_)
y_ = np.asarray(y_)
df = pd.DataFrame(x_)
df.columns = header
df['id'] = sorted(data_keys)
df['label'] = y_
# host data has no label, so this columns will all be None
if df['label'].isna().all():
df = df.drop(columns=['label'])
self.origin_table = df
self.sample_weights = np.array(sample_weights)
self.match_ids = match_ids
label_col_candidates = ['y', 'label', 'target']
# automatically set id columns
id_col_candidates = ['id', 'sid']
for id_col in id_col_candidates:
if id_col in self.origin_table:
self.sample_ids = self.origin_table[id_col].values.tolist()
self.origin_table = self.origin_table.drop(columns=[id_col])
break
# infer column name
label = self.label_col
if label is None:
for i in label_col_candidates:
if i in self.origin_table:
label = i
break
if label is None:
self.with_label = False
LOGGER.warning(
'label default setting is "auto", but found no "y"/"label"/"target" in input table')
else:
if label not in self.origin_table:
raise ValueError(
'label column {} not found in input table'.format(label))
if self.with_label:
self.label = self.origin_table[label].values
self.features = self.origin_table.drop(columns=[label]).values
if self.l_dtype:
self.label = self.label.astype(self.l_dtype)
if self.label_shape:
self.label = self.label.reshape(self.label_shape)
else:
self.label = self.label.reshape((len(self.features), -1))
if self.flatten_label:
self.label = self.label.flatten()
else:
self.label = None
self.features = self.origin_table.values
if self.f_dtype:
self.features = self.features.astype(self.f_dtype)
def get_classes(self):
if self.label is not None:
return np.unique(self.label).tolist()
else:
raise ValueError(
'no label found, please check if self.label is set')
def get_sample_ids(self):
return self.sample_ids
def get_match_ids(self):
return self.match_ids
| 6,846 | 35.036842 | 117 |
py
|
FATE
|
FATE-master/python/federatedml/nn/dataset/graph.py
|
import numpy as np
import pandas as pd
from federatedml.statistic.data_overview import with_weight
from federatedml.nn.dataset.base import Dataset
try:
from torch_geometric.data import Data
except BaseException:
pass
import torch
from federatedml.util import LOGGER
class GraphDataset(Dataset):
"""
A Graph Dataset includes feature table, edge table and input_nodes table. The data come from a given csv path, or transform from FATE DTable
Parameters
----------
id_col, name of the id column in csv, default 'id'
label_col str, name of label column in csv, if None, will automatically take 'y' or 'label' or 'target' as label
feature_dtype dtype of feature, supports int, long, float, double
label_dtype: dtype of label, supports int, long, float, double
feats_name: name of the node feature csv, default 'feats.csv'
feats_dataset_col: name of the dataset column indicating to which dataset the node belongs, default 'dataset'
feats_dataset_train: flag of the train set
feats_dataset_vali: flag of the validation set
feats_dataset_test: flag of the test set
adj_name: name of the adjacent matrix, default 'adj.csv'
adj_src_col: source node in the adjacent matrix, default 'node1'
adj_dst_col: destination node in the adjacent matrix, default 'node2'
"""
def __init__(
self,
id_col='id',
label_col='y',
feature_dtype='float',
label_dtype='long',
feats_name='feats.csv',
feats_dataset_col='dataset',
feats_dataset_train='train',
feats_dataset_vali='vali',
feats_dataset_test='test',
adj_name='adj.csv',
adj_src_col='node1',
adj_dst_col='node2'):
super(GraphDataset, self).__init__()
self.key2idx: dict = {}
self.f_dtype = self.check_dtype(feature_dtype)
self.l_dtype = self.check_dtype(label_dtype)
self.data: Data = Data()
self.sample_ids = None
self.input_nodes_train = None
self.input_nodes_vali = None
self.input_nodes_test = None
self.id_col = id_col
self.label_col = label_col
self.feats_name = feats_name
self.feats_dataset_col = feats_dataset_col
self.feats_dataset_train = feats_dataset_train
self.feats_dataset_vali = feats_dataset_vali
self.feats_dataset_test = feats_dataset_test
self.adj_name = adj_name
self.adj_src_col = adj_src_col
self.adj_dst_col = adj_dst_col
def __len__(self):
return self.data.num_nodes
@staticmethod
def check_dtype(dtype):
if dtype is not None:
avail = ['long', 'int', 'float', 'double']
assert dtype in avail, 'available dtype is {}, but got {}'.format(
avail, dtype)
if dtype == 'long':
return torch.int64
if dtype == 'int':
return torch.int32
if dtype == 'float':
return torch.float32
if dtype == 'double':
return torch.float64
return dtype
def __process_feats(self, data_path):
LOGGER.info("processing feats")
tmp = pd.read_csv(data_path + "/" + self.feats_name)
self.input_nodes_train = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_train].index.to_list()
self.input_nodes_vali = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_vali].index.to_list()
self.input_nodes_test = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_test].index.to_list()
self.data.x = torch.tensor(tmp.drop([self.id_col, self.feats_dataset_col,
self.label_col], axis=1).to_numpy(), dtype=self.f_dtype)
self.data.y = torch.tensor(tmp[self.label_col], dtype=self.l_dtype)
def __process_adj(self, data_path):
LOGGER.info("processing edges")
tmp = pd.read_csv(data_path + "/" + self.adj_name)
self.data.edge_index = torch.tensor(tmp[[self.adj_src_col, self.adj_dst_col]].T.to_numpy(), dtype=torch.long)
if len(tmp.columns) > 2:
self.data.edge_attr = torch.tensor(
tmp.drop([self.adj_src_col, self.adj_dst_col], axis=1).to_numpy(), dtype=torch.float)
def load(self, data_path):
LOGGER.info("Loading graph data...")
self.__process_feats(data_path)
self.__process_adj(data_path)
# Assign each node its global node index:
self.data.n_id = torch.arange(self.data.num_nodes)
def get_sample_ids(self):
return self.sample_ids
| 4,680 | 39.353448 | 145 |
py
|
FATE
|
FATE-master/python/federatedml/nn/dataset/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/backend/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/import_hook.py
|
try:
from federatedml.component.nn.backend.torch import nn as nn_
from federatedml.component.nn.backend.torch import init as init_
from federatedml.component.nn.backend.torch import optim as optim_
from federatedml.component.nn.backend.torch.cust import CustModel, CustLoss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def monkey_patch(torch_nn, fate_torch_module):
for name in fate_torch_module.__dict__.keys():
if '__' in name: # skip no related variables
continue
if name in torch_nn.__dict__.keys():
torch_nn.__dict__[name] = fate_torch_module.__dict__[name]
def fate_torch_hook(torch_module_var):
"""
This is a monkey patch function that modify torch modules to use fate_torch layers and Components
:param torch_module_var:
:return:
"""
if torch_module_var.__name__ == 'torch':
monkey_patch(torch_module_var.nn, nn_)
monkey_patch(torch_module_var.optim, optim_)
monkey_patch(torch_module_var.nn.init, init_)
setattr(torch_module_var.nn, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.nn':
monkey_patch(torch_module_var, nn_)
setattr(torch_module_var, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.optim':
monkey_patch(torch_module_var, optim_)
elif torch_module_var.__name__ == 'torch.nn.init':
monkey_patch(torch_module_var, init_)
else:
raise ValueError(
'this module: {} does not support fate torch hook'.format(torch_module_var))
return torch_module_var
| 1,925 | 36.764706 | 101 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/base.py
|
import json
import torch as t
from torch.nn import Sequential as tSequential
from federatedml.nn.backend.torch.operation import OpBase
class FateTorchLayer(object):
def __init__(self):
t.nn.Module.__init__(self)
self.param_dict = dict()
self.initializer = {'weight': None, 'bias': None}
self.optimizer = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['layer'] = type(self).__name__
ret_dict['initializer'] = {}
if self.initializer['weight']:
ret_dict['initializer']['weight'] = self.initializer['weight']
if self.initializer['bias']:
ret_dict['initializer']['bias'] = self.initializer['bias']
return ret_dict
def add_optimizer(self, opt):
self.optimizer = opt
class FateTorchLoss(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['loss_fn'] = type(self).__name__
return ret_dict
class FateTorchOptimizer(object):
def __init__(self):
self.param_dict = dict()
self.torch_class = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['optimizer'] = type(self).__name__
ret_dict['config_type'] = 'pytorch'
return ret_dict
def check_params(self, params):
if isinstance(
params,
FateTorchLayer) or isinstance(
params,
Sequential):
params.add_optimizer(self)
params = params.parameters()
else:
params = params
l_param = list(params)
if len(l_param) == 0:
# fake parameters, for the case that there are only cust model
return [t.nn.Parameter(t.Tensor([0]))]
return l_param
def register_optimizer(self, input_):
if input_ is None:
return
if isinstance(
input_,
FateTorchLayer) or isinstance(
input_,
Sequential):
input_.add_optimizer(self)
def to_torch_instance(self, parameters):
return self.torch_class(parameters, **self.param_dict)
class Sequential(tSequential):
def to_dict(self):
"""
get the structure of current sequential
"""
rs = {}
idx = 0
for k in self._modules:
ordered_name = str(idx) + '-' + k
rs[ordered_name] = self._modules[k].to_dict()
idx += 1
return rs
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
def add_optimizer(self, opt):
setattr(self, 'optimizer', opt)
def add(self, layer):
if isinstance(layer, Sequential):
self._modules = layer._modules
# copy optimizer
if hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
elif isinstance(layer, FateTorchLayer):
self.add_module(str(len(self)), layer)
# update optimizer if dont have
if not hasattr(self, 'optimizer') and hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
else:
raise ValueError(
'unknown input layer type {}, this type is not supported'.format(
type(layer)))
@staticmethod
def get_loss_config(loss: FateTorchLoss):
return loss.to_dict()
def get_optimizer_config(self, optimizer=None):
if hasattr(self, 'optimizer'):
return self.optimizer.to_dict()
else:
return optimizer.to_dict()
def get_network_config(self):
return self.to_dict()
def get_torch_instance(fate_torch_nn_class: FateTorchLayer, param):
parent_torch_class = fate_torch_nn_class.__bases__
if issubclass(fate_torch_nn_class, OpBase):
return fate_torch_nn_class(**param)
for cls in parent_torch_class:
if issubclass(cls, t.nn.Module):
return cls(**param)
return None
| 4,203 | 26.657895 | 81 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/optim.py
|
from torch import optim
from federatedml.nn.backend.torch.base import FateTorchLayer, Sequential
from federatedml.nn.backend.torch.base import FateTorchOptimizer
class ASGD(optim.ASGD, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lambd=0.0001,
alpha=0.75,
t0=1000000.0,
weight_decay=0,
foreach=None,
maximize=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lambd'] = lambd
self.param_dict['alpha'] = alpha
self.param_dict['t0'] = t0
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.ASGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer ASGD without initiated parameters'.format(type(self).__name__)
class Adadelta(optim.Adadelta, FateTorchOptimizer):
def __init__(self, params=None, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['rho'] = rho
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adadelta.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adadelta without initiated parameters'.format(type(self).__name__)
class Adagrad(optim.Adagrad, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lr_decay'] = lr_decay
self.param_dict['weight_decay'] = weight_decay
self.param_dict['initial_accumulator_value'] = initial_accumulator_value
self.param_dict['eps'] = eps
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adagrad.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adagrad without initiated parameters'.format(type(self).__name__)
class Adam(optim.Adam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adam without initiated parameters'.format(type(self).__name__)
class AdamW(optim.AdamW, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.AdamW.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer AdamW without initiated parameters'.format(type(self).__name__)
class Adamax(optim.Adamax, FateTorchOptimizer):
def __init__(self, params=None, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adamax.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adamax without initiated parameters'.format(type(self).__name__)
class LBFGS(optim.LBFGS, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-07,
tolerance_change=1e-09,
history_size=100,
line_search_fn=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['max_iter'] = max_iter
self.param_dict['max_eval'] = max_eval
self.param_dict['tolerance_grad'] = tolerance_grad
self.param_dict['tolerance_change'] = tolerance_change
self.param_dict['history_size'] = history_size
self.param_dict['line_search_fn'] = line_search_fn
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.LBFGS.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer LBFGS without initiated parameters'.format(type(self).__name__)
class NAdam(optim.NAdam, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.002,
betas=(
0.9,
0.999),
eps=1e-08,
weight_decay=0,
momentum_decay=0.004,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum_decay'] = momentum_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.NAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer NAdam without initiated parameters'.format(type(self).__name__)
class RAdam(optim.RAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RAdam without initiated parameters'.format(type(self).__name__)
class RMSprop(optim.RMSprop, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
alpha=0.99,
eps=1e-08,
weight_decay=0,
momentum=0,
centered=False,
foreach=None,
maximize=False,
differentiable=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['alpha'] = alpha
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum'] = momentum
self.param_dict['centered'] = centered
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.param_dict['differentiable'] = differentiable
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RMSprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RMSprop without initiated parameters'.format(type(self).__name__)
class Rprop(optim.Rprop, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50), foreach=None, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['etas'] = etas
self.param_dict['step_sizes'] = step_sizes
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Rprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Rprop without initiated parameters'.format(type(self).__name__)
class SGD(optim.SGD, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['momentum'] = momentum
self.param_dict['dampening'] = dampening
self.param_dict['weight_decay'] = weight_decay
self.param_dict['nesterov'] = nesterov
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SGD without initiated parameters'.format(type(self).__name__)
class SparseAdam(optim.SparseAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SparseAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SparseAdam without initiated parameters'.format(type(self).__name__)
| 13,025 | 30.3125 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/cust_model.py
|
import importlib
from torch import nn
from federatedml.nn.backend.torch.base import FateTorchLayer
from federatedml.nn.backend.utils.common import ML_PATH
PATH = '{}.model_zoo'.format(ML_PATH)
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(
module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'module_name': module_name,
'class_name': class_name,
'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self):
module_name: str = self.param_dict['module_name']
class_name = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module('{}.{}'.format(PATH, module_name))
try:
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(
v, nn.Module) and v is not nn.Module and v.__name__ == class_name:
return v(**module_param)
raise ValueError(
'Did not find any class in {}.py that is pytorch nn.Module and named {}'. format(
module_name, class_name))
except ValueError as e:
raise e
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
| 1,984 | 34.446429 | 97 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/cust.py
|
from torch import nn
import importlib
from federatedml.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from federatedml.nn.backend.utils.common import ML_PATH, LLM_PATH
import difflib
LLM_MODEL_PATH = '{}.model_zoo'.format(LLM_PATH)
MODEL_PATH = '{}.model_zoo'.format(ML_PATH)
LOSS_PATH = '{}.loss'.format(ML_PATH)
def str_simi(str_a, str_b):
return difflib.SequenceMatcher(None, str_a, str_b).quick_ratio()
def get_class(module_name, class_name, param, base_path):
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module(
'{}.{}'.format(base_path, module_name))
try:
name_simi_list = []
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, nn.Module) and v is not nn.Module:
if v.__name__ == class_name:
return v(**param)
else:
name_simi_list += ([(str_simi(class_name, v.__name__), v)])
sort_by_simi = sorted(name_simi_list, key=lambda x: -x[0])
if len(sort_by_simi) > 0:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}. Do you mean {}?'. format(
module_name, class_name, sort_by_simi[0][1].__name__))
else:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}'. format(
module_name, class_name))
except ValueError as e:
raise e
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(
module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'module_name': module_name,
'class_name': class_name,
'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self, module_path=None):
if module_path is None:
try:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
MODEL_PATH)
except BaseException:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
LLM_MODEL_PATH)
else:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
module_path)
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
class CustLoss(FateTorchLoss, nn.Module):
def __init__(self, loss_module_name, class_name, **kwargs):
super(CustLoss, self).__init__()
assert isinstance(
loss_module_name, str), 'loss module name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'loss_module_name': loss_module_name,
'class_name': class_name,
'param': kwargs}
self._loss_fn = None
def init_loss_fn(self):
if self._loss_fn is None:
self._loss_fn = self.get_pytorch_model()
def forward(self, pred, label):
if self._loss_fn is None:
raise ValueError('loss not init, call init_loss_fn() function')
return self._loss_fn(pred, label)
def get_pytorch_model(self, module_path=None):
module_name: str = self.param_dict['loss_module_name']
class_name: str = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_path is None:
return get_class(
module_name=module_name,
class_name=class_name,
param=module_param,
base_path=LOSS_PATH)
else:
return get_class(
module_name=module_name,
class_name=class_name,
param=module_param,
base_path=module_path)
def __repr__(self):
return 'CustLoss({})'.format(str(self.param_dict))
| 4,907 | 34.057143 | 118 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/init.py
|
import copy
import torch as t
from torch.nn import init as torch_init
import functools
from federatedml.nn.backend.torch.base import FateTorchLayer
from federatedml.nn.backend.torch.base import Sequential
str_init_func_map = {
"uniform": torch_init.uniform_,
"normal": torch_init.normal_,
"constant": torch_init.constant_,
"xavier_uniform": torch_init.xavier_uniform_,
"xavier_normal": torch_init.xavier_normal_,
"kaiming_uniform": torch_init.kaiming_uniform_,
"kaiming_normal": torch_init.kaiming_normal_,
"eye": torch_init.eye_,
"dirac": torch_init.dirac_,
"orthogonal": torch_init.orthogonal_,
"sparse": torch_init.sparse_,
"zeros": torch_init.zeros_,
"ones": torch_init.ones_
}
#
# def extract_param(func):
#
# args = inspect.getargspec(func)
# keys = args[0][1:]
# if len(keys) == 0:
# return {}
# defaults = args[-1]
# args_map = {}
# if defaults is not None:
# for idx, i in enumerate(keys[-len(defaults):]):
# args_map[i] = defaults[idx]
#
# for i in keys:
# if i not in args_map:
# args_map[i] = Required()
#
# return args_map
def init_weight(m, initializer):
if hasattr(m, 'weight'):
initializer(m.weight)
# LSTM RNN
if hasattr(m, 'weight_hh_l0'):
initializer(m.weight_hh_l0)
# LSTM RNN
if hasattr(m, 'weight_ih_l0'):
initializer(m.weight_ih_l0)
def init_bias(m, initializer):
if hasattr(
m,
'bias') and not isinstance(
m.bias,
bool) and m.bias is not None: # LSTM, RNN .bias is bool
initializer(m.bias)
# LSTM RNN
if hasattr(m, 'bias_hh_l0') and m.bias_hh_l0 is not None:
initializer(m.bias_hh_l0)
# LSTM RNN
if hasattr(m, 'bias_ih_l0') and m.bias_ih_l0 is not None:
initializer(m.bias_ih_l0)
def get_init_func_type(init='weight'):
if init == 'weight':
return init_weight
elif init == 'bias':
return init_bias
else:
return None
def recursive_init(m, init_func, obj):
if len(list(m.children())) > 0:
if m == obj:
return
recursive_init(m, init_func, m)
else:
try:
init_func(m)
except Exception as e:
print('initialize layer {} failed, exception is :{}'.format(m, e))
def make_apply_func(torch_initializer, param_dict, init_func, layer):
initializer = functools.partial(torch_initializer, **param_dict)
init_func = functools.partial(init_func, initializer=initializer)
recursive_init_func = functools.partial(
recursive_init, obj=layer, init_func=init_func)
return recursive_init_func, param_dict
def get_init_dict(init_func, param_dict, init_type):
rev_dict = {v: k for k, v in str_init_func_map.items()}
rs = {
'init_type': init_type,
'init_func': rev_dict[init_func],
'param': param_dict}
return rs
def record_initializer(layers, init_dict):
if isinstance(layers, FateTorchLayer):
if init_dict['init_type'] == 'weight':
layers.initializer['weight'] = init_dict
elif init_dict['init_type'] == 'bias':
layers.initializer['bias'] = init_dict
def run_init(torch_initializer, input_var, init, layer):
# recursive init
if isinstance(layer, Sequential):
for sub_layer in layer:
run_init(torch_initializer, input_var, init, sub_layer)
# init layer
elif isinstance(layer, FateTorchLayer) or isinstance(layer, t.nn.Module):
recursive_init_func, param_dict = make_apply_func(
torch_initializer, copy.deepcopy(input_var), get_init_func_type(init), layer)
layer.apply(recursive_init_func)
record_initializer(
layer,
get_init_dict(
torch_initializer,
param_dict,
init))
else:
try:
return torch_initializer(layer, **input_var)
except Exception as e:
print(e)
print('skip initialization')
"""
Init Func
"""
def local_extract(local_dict):
param = {}
for k, v in local_dict.items():
if k != 'layer' and k != 'init':
param[k] = v
return copy.deepcopy(param)
def uniform_(layer, a=0, b=1, init='weight'):
run_init(
str_init_func_map['uniform'],
local_extract(
locals()),
init,
layer)
def normal_(layer, mean=0, std=1, init='weight'):
run_init(str_init_func_map['normal'], local_extract(locals()), init, layer)
def constant_(layer, val, init='weight'):
run_init(
str_init_func_map['constant'],
local_extract(
locals()),
init,
layer)
def ones_(layer, init='weight'):
run_init(str_init_func_map['ones'], local_extract(locals()), init, layer)
def zeros_(layer, init='weight'):
run_init(str_init_func_map['zeros'], local_extract(locals()), init, layer)
def eye_(layer, init='weight'):
run_init(str_init_func_map['eye'], local_extract(locals()), init, layer)
def dirac_(layer, group=1, init='weight'):
run_init(str_init_func_map['dirac'], local_extract(locals()), init, layer)
def xavier_uniform_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_uniform'],
local_extract(locals()), init, layer)
def xavier_normal_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_normal'],
local_extract(locals()), init, layer)
def kaiming_uniform_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_uniform'],
local_extract(locals()), init, layer)
def kaiming_normal_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_normal'],
local_extract(locals()), init, layer)
def orthogonal_(layer, gain=1, init='weight'):
run_init(
str_init_func_map['orthogonal'],
local_extract(
locals()),
init,
layer)
def sparse_(layer, sparsity, std=0.01, init='weight'):
run_init(str_init_func_map['sparse'], local_extract(locals()), init, layer)
str_fate_torch_init_func_map = {
"uniform": uniform_,
"normal": normal_,
"constant": constant_,
"xavier_uniform": xavier_uniform_,
"xavier_normal": xavier_normal_,
"kaiming_uniform": kaiming_uniform_,
"kaiming_normal": kaiming_normal_,
"eye": eye_,
"dirac": dirac_,
"orthogonal": orthogonal_,
"sparse": sparse_,
"zeros": zeros_,
"ones": ones_
}
if __name__ == '__main__':
pass
| 6,761 | 25.622047 | 89 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/nn.py
|
from torch import nn
from federatedml.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from federatedml.nn.backend.torch.base import Sequential
class Bilinear(nn.modules.linear.Bilinear, FateTorchLayer):
def __init__(
self,
in1_features,
in2_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in1_features'] = in1_features
self.param_dict['in2_features'] = in2_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Bilinear.__init__(self, **self.param_dict)
class Identity(nn.modules.linear.Identity, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.linear.Identity.__init__(self, **self.param_dict)
class LazyLinear(nn.modules.linear.LazyLinear, FateTorchLayer):
def __init__(
self,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.LazyLinear.__init__(self, **self.param_dict)
class Linear(nn.modules.linear.Linear, FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Linear.__init__(self, **self.param_dict)
class NonDynamicallyQuantizableLinear(
nn.modules.linear.NonDynamicallyQuantizableLinear,
FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.NonDynamicallyQuantizableLinear.__init__(
self, **self.param_dict)
class GRU(nn.modules.rnn.GRU, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.GRU.__init__(self, **self.param_dict)
class GRUCell(nn.modules.rnn.GRUCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.GRUCell.__init__(self, **self.param_dict)
class LSTM(nn.modules.rnn.LSTM, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.LSTM.__init__(self, **self.param_dict)
class LSTMCell(nn.modules.rnn.LSTMCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.LSTMCell.__init__(self, **self.param_dict)
class RNN(nn.modules.rnn.RNN, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.RNN.__init__(self, **self.param_dict)
class RNNBase(nn.modules.rnn.RNNBase, FateTorchLayer):
def __init__(
self,
mode,
input_size,
hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0.0,
bidirectional=False,
proj_size=0,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_layers'] = num_layers
self.param_dict['bias'] = bias
self.param_dict['batch_first'] = batch_first
self.param_dict['dropout'] = dropout
self.param_dict['bidirectional'] = bidirectional
self.param_dict['proj_size'] = proj_size
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['mode'] = mode
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNBase.__init__(self, **self.param_dict)
class RNNCell(nn.modules.rnn.RNNCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
nonlinearity='tanh',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['nonlinearity'] = nonlinearity
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCell.__init__(self, **self.param_dict)
class RNNCellBase(nn.modules.rnn.RNNCellBase, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias,
num_chunks,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict['bias'] = bias
self.param_dict['num_chunks'] = num_chunks
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCellBase.__init__(self, **self.param_dict)
class Embedding(nn.modules.sparse.Embedding, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding_idx'] = padding_idx
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.Embedding.__init__(self, **self.param_dict)
class EmbeddingBag(nn.modules.sparse.EmbeddingBag, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
mode='mean',
sparse=False,
_weight=None,
include_last_offset=False,
padding_idx=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['mode'] = mode
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['include_last_offset'] = include_last_offset
self.param_dict['padding_idx'] = padding_idx
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.EmbeddingBag.__init__(self, **self.param_dict)
class AlphaDropout(nn.modules.dropout.AlphaDropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.AlphaDropout.__init__(self, **self.param_dict)
class Dropout(nn.modules.dropout.Dropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout.__init__(self, **self.param_dict)
class Dropout1d(nn.modules.dropout.Dropout1d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout1d.__init__(self, **self.param_dict)
class Dropout2d(nn.modules.dropout.Dropout2d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout2d.__init__(self, **self.param_dict)
class Dropout3d(nn.modules.dropout.Dropout3d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout3d.__init__(self, **self.param_dict)
class FeatureAlphaDropout(
nn.modules.dropout.FeatureAlphaDropout,
FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.FeatureAlphaDropout.__init__(
self, **self.param_dict)
class _DropoutNd(nn.modules.dropout._DropoutNd, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout._DropoutNd.__init__(self, **self.param_dict)
class CELU(nn.modules.activation.CELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.CELU.__init__(self, **self.param_dict)
class ELU(nn.modules.activation.ELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ELU.__init__(self, **self.param_dict)
class GELU(nn.modules.activation.GELU, FateTorchLayer):
def __init__(self, approximate='none', **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['approximate'] = approximate
self.param_dict.update(kwargs)
nn.modules.activation.GELU.__init__(self, **self.param_dict)
class GLU(nn.modules.activation.GLU, FateTorchLayer):
def __init__(self, dim=-1, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.GLU.__init__(self, **self.param_dict)
class Hardshrink(nn.modules.activation.Hardshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Hardshrink.__init__(self, **self.param_dict)
class Hardsigmoid(nn.modules.activation.Hardsigmoid, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardsigmoid.__init__(self, **self.param_dict)
class Hardswish(nn.modules.activation.Hardswish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardswish.__init__(self, **self.param_dict)
class Hardtanh(nn.modules.activation.Hardtanh, FateTorchLayer):
def __init__(
self,
min_val=-1.0,
max_val=1.0,
inplace=False,
min_value=None,
max_value=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['min_val'] = min_val
self.param_dict['max_val'] = max_val
self.param_dict['inplace'] = inplace
self.param_dict['min_value'] = min_value
self.param_dict['max_value'] = max_value
self.param_dict.update(kwargs)
nn.modules.activation.Hardtanh.__init__(self, **self.param_dict)
class LeakyReLU(nn.modules.activation.LeakyReLU, FateTorchLayer):
def __init__(self, negative_slope=0.01, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['negative_slope'] = negative_slope
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.LeakyReLU.__init__(self, **self.param_dict)
class LogSigmoid(nn.modules.activation.LogSigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.LogSigmoid.__init__(self, **self.param_dict)
class LogSoftmax(nn.modules.activation.LogSoftmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.LogSoftmax.__init__(self, **self.param_dict)
class Mish(nn.modules.activation.Mish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Mish.__init__(self, **self.param_dict)
class MultiheadAttention(
nn.modules.activation.MultiheadAttention,
FateTorchLayer):
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dropout'] = dropout
self.param_dict['bias'] = bias
self.param_dict['add_bias_kv'] = add_bias_kv
self.param_dict['add_zero_attn'] = add_zero_attn
self.param_dict['kdim'] = kdim
self.param_dict['vdim'] = vdim
self.param_dict['batch_first'] = batch_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['embed_dim'] = embed_dim
self.param_dict['num_heads'] = num_heads
self.param_dict.update(kwargs)
nn.modules.activation.MultiheadAttention.__init__(
self, **self.param_dict)
class PReLU(nn.modules.activation.PReLU, FateTorchLayer):
def __init__(
self,
num_parameters=1,
init=0.25,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_parameters'] = num_parameters
self.param_dict['init'] = init
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.activation.PReLU.__init__(self, **self.param_dict)
class RReLU(nn.modules.activation.RReLU, FateTorchLayer):
def __init__(
self,
lower=0.125,
upper=0.3333333333333333,
inplace=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lower'] = lower
self.param_dict['upper'] = upper
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.RReLU.__init__(self, **self.param_dict)
class ReLU(nn.modules.activation.ReLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU.__init__(self, **self.param_dict)
class ReLU6(nn.modules.activation.ReLU6, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU6.__init__(self, **self.param_dict)
class SELU(nn.modules.activation.SELU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SELU.__init__(self, **self.param_dict)
class SiLU(nn.modules.activation.SiLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SiLU.__init__(self, **self.param_dict)
class Sigmoid(nn.modules.activation.Sigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Sigmoid.__init__(self, **self.param_dict)
class Softmax(nn.modules.activation.Softmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmax.__init__(self, **self.param_dict)
class Softmax2d(nn.modules.activation.Softmax2d, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softmax2d.__init__(self, **self.param_dict)
class Softmin(nn.modules.activation.Softmin, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmin.__init__(self, **self.param_dict)
class Softplus(nn.modules.activation.Softplus, FateTorchLayer):
def __init__(self, beta=1, threshold=20, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['beta'] = beta
self.param_dict['threshold'] = threshold
self.param_dict.update(kwargs)
nn.modules.activation.Softplus.__init__(self, **self.param_dict)
class Softshrink(nn.modules.activation.Softshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Softshrink.__init__(self, **self.param_dict)
class Softsign(nn.modules.activation.Softsign, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softsign.__init__(self, **self.param_dict)
class Tanh(nn.modules.activation.Tanh, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanh.__init__(self, **self.param_dict)
class Tanhshrink(nn.modules.activation.Tanhshrink, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanhshrink.__init__(self, **self.param_dict)
class Threshold(nn.modules.activation.Threshold, FateTorchLayer):
def __init__(self, threshold, value, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict['threshold'] = threshold
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.activation.Threshold.__init__(self, **self.param_dict)
class Conv1d(nn.modules.conv.Conv1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv1d.__init__(self, **self.param_dict)
class Conv2d(nn.modules.conv.Conv2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv2d.__init__(self, **self.param_dict)
class Conv3d(nn.modules.conv.Conv3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv3d.__init__(self, **self.param_dict)
class ConvTranspose1d(nn.modules.conv.ConvTranspose1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose1d.__init__(self, **self.param_dict)
class ConvTranspose2d(nn.modules.conv.ConvTranspose2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose2d.__init__(self, **self.param_dict)
class ConvTranspose3d(nn.modules.conv.ConvTranspose3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose3d.__init__(self, **self.param_dict)
class LazyConv1d(nn.modules.conv.LazyConv1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv1d.__init__(self, **self.param_dict)
class LazyConv2d(nn.modules.conv.LazyConv2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv2d.__init__(self, **self.param_dict)
class LazyConv3d(nn.modules.conv.LazyConv3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv3d.__init__(self, **self.param_dict)
class LazyConvTranspose1d(nn.modules.conv.LazyConvTranspose1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose1d.__init__(self, **self.param_dict)
class LazyConvTranspose2d(nn.modules.conv.LazyConvTranspose2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose2d.__init__(self, **self.param_dict)
class LazyConvTranspose3d(nn.modules.conv.LazyConvTranspose3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose3d.__init__(self, **self.param_dict)
class _ConvNd(nn.modules.conv._ConvNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvNd.__init__(self, **self.param_dict)
class _ConvTransposeMixin(nn.modules.conv._ConvTransposeMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeMixin.__init__(self, **self.param_dict)
class _ConvTransposeNd(nn.modules.conv._ConvTransposeNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeNd.__init__(self, **self.param_dict)
class _LazyConvXdMixin(nn.modules.conv._LazyConvXdMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._LazyConvXdMixin.__init__(self, **self.param_dict)
class Transformer(nn.modules.transformer.Transformer, FateTorchLayer):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
custom_encoder=None,
custom_decoder=None,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict['num_encoder_layers'] = num_encoder_layers
self.param_dict['num_decoder_layers'] = num_decoder_layers
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['custom_encoder'] = custom_encoder
self.param_dict['custom_decoder'] = custom_decoder
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.transformer.Transformer.__init__(self, **self.param_dict)
class TransformerDecoder(
nn.modules.transformer.TransformerDecoder,
FateTorchLayer):
def __init__(self, decoder_layer, num_layers, norm=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['decoder_layer'] = decoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoder.__init__(
self, **self.param_dict)
class TransformerDecoderLayer(
nn.modules.transformer.TransformerDecoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoderLayer.__init__(
self, **self.param_dict)
class TransformerEncoder(
nn.modules.transformer.TransformerEncoder,
FateTorchLayer):
def __init__(
self,
encoder_layer,
num_layers,
norm=None,
enable_nested_tensor=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['enable_nested_tensor'] = enable_nested_tensor
self.param_dict['encoder_layer'] = encoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoder.__init__(
self, **self.param_dict)
class TransformerEncoderLayer(
nn.modules.transformer.TransformerEncoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoderLayer.__init__(
self, **self.param_dict)
class AdaptiveAvgPool1d(nn.modules.pooling.AdaptiveAvgPool1d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool1d.__init__(self, **self.param_dict)
class AdaptiveAvgPool2d(nn.modules.pooling.AdaptiveAvgPool2d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool2d.__init__(self, **self.param_dict)
class AdaptiveAvgPool3d(nn.modules.pooling.AdaptiveAvgPool3d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool3d.__init__(self, **self.param_dict)
class AdaptiveMaxPool1d(nn.modules.pooling.AdaptiveMaxPool1d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool1d.__init__(self, **self.param_dict)
class AdaptiveMaxPool2d(nn.modules.pooling.AdaptiveMaxPool2d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool2d.__init__(self, **self.param_dict)
class AdaptiveMaxPool3d(nn.modules.pooling.AdaptiveMaxPool3d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool3d.__init__(self, **self.param_dict)
class AvgPool1d(nn.modules.pooling.AvgPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool1d.__init__(self, **self.param_dict)
class AvgPool2d(nn.modules.pooling.AvgPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool2d.__init__(self, **self.param_dict)
class AvgPool3d(nn.modules.pooling.AvgPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool3d.__init__(self, **self.param_dict)
class FractionalMaxPool2d(
nn.modules.pooling.FractionalMaxPool2d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool2d.__init__(
self, **self.param_dict)
class FractionalMaxPool3d(
nn.modules.pooling.FractionalMaxPool3d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool3d.__init__(
self, **self.param_dict)
class LPPool1d(nn.modules.pooling.LPPool1d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool1d.__init__(self, **self.param_dict)
class LPPool2d(nn.modules.pooling.LPPool2d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool2d.__init__(self, **self.param_dict)
class MaxPool1d(nn.modules.pooling.MaxPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool1d.__init__(self, **self.param_dict)
class MaxPool2d(nn.modules.pooling.MaxPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool2d.__init__(self, **self.param_dict)
class MaxPool3d(nn.modules.pooling.MaxPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool3d.__init__(self, **self.param_dict)
class MaxUnpool1d(nn.modules.pooling.MaxUnpool1d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool1d.__init__(self, **self.param_dict)
class MaxUnpool2d(nn.modules.pooling.MaxUnpool2d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool2d.__init__(self, **self.param_dict)
class MaxUnpool3d(nn.modules.pooling.MaxUnpool3d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool3d.__init__(self, **self.param_dict)
class _AdaptiveAvgPoolNd(
nn.modules.pooling._AdaptiveAvgPoolNd,
FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveAvgPoolNd.__init__(self, **self.param_dict)
class _AdaptiveMaxPoolNd(
nn.modules.pooling._AdaptiveMaxPoolNd,
FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveMaxPoolNd.__init__(self, **self.param_dict)
class _AvgPoolNd(nn.modules.pooling._AvgPoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._AvgPoolNd.__init__(self, **self.param_dict)
class _LPPoolNd(nn.modules.pooling._LPPoolNd, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._LPPoolNd.__init__(self, **self.param_dict)
class _MaxPoolNd(nn.modules.pooling._MaxPoolNd, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._MaxPoolNd.__init__(self, **self.param_dict)
class _MaxUnpoolNd(nn.modules.pooling._MaxUnpoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._MaxUnpoolNd.__init__(self, **self.param_dict)
class BatchNorm1d(nn.modules.batchnorm.BatchNorm1d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm1d.__init__(self, **self.param_dict)
class BatchNorm2d(nn.modules.batchnorm.BatchNorm2d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm2d.__init__(self, **self.param_dict)
class BatchNorm3d(nn.modules.batchnorm.BatchNorm3d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm3d.__init__(self, **self.param_dict)
class LazyBatchNorm1d(nn.modules.batchnorm.LazyBatchNorm1d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm1d.__init__(self, **self.param_dict)
class LazyBatchNorm2d(nn.modules.batchnorm.LazyBatchNorm2d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm2d.__init__(self, **self.param_dict)
class LazyBatchNorm3d(nn.modules.batchnorm.LazyBatchNorm3d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm3d.__init__(self, **self.param_dict)
class SyncBatchNorm(nn.modules.batchnorm.SyncBatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['process_group'] = process_group
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.SyncBatchNorm.__init__(self, **self.param_dict)
class _BatchNorm(nn.modules.batchnorm._BatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._BatchNorm.__init__(self, **self.param_dict)
class _LazyNormBase(nn.modules.batchnorm._LazyNormBase, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm._LazyNormBase.__init__(self, **self.param_dict)
class _NormBase(nn.modules.batchnorm._NormBase, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._NormBase.__init__(self, **self.param_dict)
class ConstantPad1d(nn.modules.padding.ConstantPad1d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad1d.__init__(self, **self.param_dict)
class ConstantPad2d(nn.modules.padding.ConstantPad2d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad2d.__init__(self, **self.param_dict)
class ConstantPad3d(nn.modules.padding.ConstantPad3d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad3d.__init__(self, **self.param_dict)
class ReflectionPad1d(nn.modules.padding.ReflectionPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad1d.__init__(self, **self.param_dict)
class ReflectionPad2d(nn.modules.padding.ReflectionPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad2d.__init__(self, **self.param_dict)
class ReflectionPad3d(nn.modules.padding.ReflectionPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad3d.__init__(self, **self.param_dict)
class ReplicationPad1d(nn.modules.padding.ReplicationPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad1d.__init__(self, **self.param_dict)
class ReplicationPad2d(nn.modules.padding.ReplicationPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad2d.__init__(self, **self.param_dict)
class ReplicationPad3d(nn.modules.padding.ReplicationPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad3d.__init__(self, **self.param_dict)
class ZeroPad2d(nn.modules.padding.ZeroPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ZeroPad2d.__init__(self, **self.param_dict)
class _ConstantPadNd(nn.modules.padding._ConstantPadNd, FateTorchLayer):
def __init__(self, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding._ConstantPadNd.__init__(self, **self.param_dict)
class _ReflectionPadNd(nn.modules.padding._ReflectionPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReflectionPadNd.__init__(self, **self.param_dict)
class _ReplicationPadNd(nn.modules.padding._ReplicationPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReplicationPadNd.__init__(self, **self.param_dict)
class BCELoss(nn.modules.loss.BCELoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.BCELoss.__init__(self, **self.param_dict)
class BCEWithLogitsLoss(nn.modules.loss.BCEWithLogitsLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
pos_weight=None,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['pos_weight'] = pos_weight
self.param_dict.update(kwargs)
nn.modules.loss.BCEWithLogitsLoss.__init__(self, **self.param_dict)
class CTCLoss(nn.modules.loss.CTCLoss, FateTorchLoss):
def __init__(
self,
blank=0,
reduction='mean',
zero_infinity=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['blank'] = blank
self.param_dict['reduction'] = reduction
self.param_dict['zero_infinity'] = zero_infinity
self.param_dict.update(kwargs)
nn.modules.loss.CTCLoss.__init__(self, **self.param_dict)
class CosineEmbeddingLoss(nn.modules.loss.CosineEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.CosineEmbeddingLoss.__init__(self, **self.param_dict)
class CrossEntropyLoss(nn.modules.loss.CrossEntropyLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
label_smoothing=0.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['label_smoothing'] = label_smoothing
self.param_dict.update(kwargs)
nn.modules.loss.CrossEntropyLoss.__init__(self, **self.param_dict)
class GaussianNLLLoss(nn.modules.loss.GaussianNLLLoss, FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.GaussianNLLLoss.__init__(self, **self.param_dict)
class HingeEmbeddingLoss(nn.modules.loss.HingeEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.HingeEmbeddingLoss.__init__(self, **self.param_dict)
class HuberLoss(nn.modules.loss.HuberLoss, FateTorchLoss):
def __init__(self, reduction='mean', delta=1.0, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict['reduction'] = reduction
self.param_dict['delta'] = delta
self.param_dict.update(kwargs)
nn.modules.loss.HuberLoss.__init__(self, **self.param_dict)
class KLDivLoss(nn.modules.loss.KLDivLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
log_target=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['log_target'] = log_target
self.param_dict.update(kwargs)
nn.modules.loss.KLDivLoss.__init__(self, **self.param_dict)
class L1Loss(nn.modules.loss.L1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.L1Loss.__init__(self, **self.param_dict)
class MSELoss(nn.modules.loss.MSELoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MSELoss.__init__(self, **self.param_dict)
class MarginRankingLoss(nn.modules.loss.MarginRankingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MarginRankingLoss.__init__(self, **self.param_dict)
class MultiLabelMarginLoss(
nn.modules.loss.MultiLabelMarginLoss,
FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelMarginLoss.__init__(self, **self.param_dict)
class MultiLabelSoftMarginLoss(
nn.modules.loss.MultiLabelSoftMarginLoss,
FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelSoftMarginLoss.__init__(
self, **self.param_dict)
class MultiMarginLoss(nn.modules.loss.MultiMarginLoss, FateTorchLoss):
def __init__(
self,
p=1,
margin=1.0,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['p'] = p
self.param_dict['margin'] = margin
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiMarginLoss.__init__(self, **self.param_dict)
class NLLLoss(nn.modules.loss.NLLLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss.__init__(self, **self.param_dict)
class NLLLoss2d(nn.modules.loss.NLLLoss2d, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss2d.__init__(self, **self.param_dict)
class PoissonNLLLoss(nn.modules.loss.PoissonNLLLoss, FateTorchLoss):
def __init__(
self,
log_input=True,
full=False,
size_average=None,
eps=1e-08,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['log_input'] = log_input
self.param_dict['full'] = full
self.param_dict['size_average'] = size_average
self.param_dict['eps'] = eps
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.PoissonNLLLoss.__init__(self, **self.param_dict)
class SmoothL1Loss(nn.modules.loss.SmoothL1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
beta=1.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['beta'] = beta
self.param_dict.update(kwargs)
nn.modules.loss.SmoothL1Loss.__init__(self, **self.param_dict)
class SoftMarginLoss(nn.modules.loss.SoftMarginLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.SoftMarginLoss.__init__(self, **self.param_dict)
class TripletMarginLoss(nn.modules.loss.TripletMarginLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
p=2.0,
eps=1e-06,
swap=False,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['p'] = p
self.param_dict['eps'] = eps
self.param_dict['swap'] = swap
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginLoss.__init__(self, **self.param_dict)
class TripletMarginWithDistanceLoss(
nn.modules.loss.TripletMarginWithDistanceLoss,
FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginWithDistanceLoss.__init__(
self, **self.param_dict)
class _Loss(nn.modules.loss._Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._Loss.__init__(self, **self.param_dict)
class _WeightedLoss(nn.modules.loss._WeightedLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._WeightedLoss.__init__(self, **self.param_dict)
| 81,778 | 32.406454 | 79 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/interactive.py
|
import torch as t
from torch.nn import ReLU, Linear, LazyLinear, Tanh, Sigmoid, Dropout, Sequential
from federatedml.nn.backend.torch.base import FateTorchLayer
class InteractiveLayer(t.nn.Module, FateTorchLayer):
r"""A :class: InteractiveLayer.
An interface for InteractiveLayer. In interactive layer, the forward method is:
out = activation( Linear(guest_input) + Linear(host_0_input) + Linear(host_1_input) ..)
Args:
out_dim: int, the output dimension of InteractiveLayer
host_num: int, specify the number of host party, default is 1, need to modify this parameter
when running multi-party modeling
guest_dim: int or None, the input dimension of guest features, if None, will use LazyLinear layer
that automatically infers the input dimension
host_dim: int, or None:
int: the input dimension of all host features
None: automatically infer the input dimension of all host features
activation: str, support relu, tanh, sigmoid
dropout: float in 0-1, if None, dropout is disabled
guest_bias: bias for guest linear layer
host_bias: bias for host linear layers
need_guest: if false, will ignore the input of guest bottom model
"""
def __init__(
self,
out_dim,
guest_dim=None,
host_num=1,
host_dim=None,
activation='relu',
dropout=None,
guest_bias=True,
host_bias=True,
need_guest=True,
):
t.nn.Module.__init__(self)
FateTorchLayer.__init__(self)
self.activation = None
if activation is not None:
if activation.lower() == 'relu':
self.activation = ReLU()
elif activation.lower() == 'tanh':
self.activation = Tanh()
elif activation.lower() == 'sigmoid':
self.activation = Sigmoid()
else:
raise ValueError(
'activation not support {}, avail: relu, tanh, sigmoid'.format(activation))
self.dropout = None
if dropout is not None:
assert isinstance(dropout, float), 'dropout must be a float'
self.dropout = Dropout(p=dropout)
assert isinstance(out_dim, int), 'out_dim must be an int >= 0'
self.param_dict['out_dim'] = out_dim
self.param_dict['activation'] = activation
self.param_dict['dropout'] = dropout
self.param_dict['need_guest'] = need_guest
assert isinstance(
host_num, int) and host_num >= 1, 'host number is an int >= 1'
self.param_dict['host_num'] = host_num
if guest_dim is not None:
assert isinstance(guest_dim, int)
if host_dim is not None:
assert isinstance(host_dim, int)
self.guest_bias = guest_bias
self.param_dict['guest_dim'] = guest_dim
self.param_dict['host_dim'] = host_dim
self.param_dict['guest_bias'] = guest_bias
self.param_dict['host_bias'] = host_bias
if need_guest:
if guest_dim is None:
self.guest_model = LazyLinear(out_dim, guest_bias)
else:
self.guest_model = Linear(guest_dim, out_dim, guest_bias)
else:
self.guest_model = None
self.out_dim = out_dim
self.host_dim = host_dim
self.host_bias = host_bias
self.host_model = None
self.need_guest = need_guest
self.host_model = t.nn.ModuleList()
for i in range(host_num):
self.host_model.append(self.make_host_model())
if self.dropout is not None:
self.act_seq = Sequential(
self.activation,
self.dropout
)
else:
self.act_seq = Sequential(
self.activation
)
def lazy_to_linear(self, guest_dim=None, host_dims=None):
if isinstance(
self.guest_model,
t.nn.LazyLinear) and guest_dim is not None:
self.guest_model = t.nn.Linear(
guest_dim, self.out_dim, bias=self.guest_bias)
if isinstance(
self.host_model[0],
t.nn.LazyLinear) and host_dims is not None:
new_model_list = t.nn.ModuleList()
for dim in host_dims:
new_model_list.append(
t.nn.Linear(
dim,
self.out_dim,
bias=self.host_bias))
self.host_model = new_model_list
def make_host_model(self):
if self.host_dim is None:
return LazyLinear(self.out_dim, self.host_bias)
else:
return Linear(self.host_dim, self.out_dim, self.host_bias)
def forward(self, x_guest, x_host):
if self.need_guest:
g_out = self.guest_model(x_guest)
else:
g_out = 0
h_out = None
if isinstance(x_host, list):
for m, data in zip(self.host_model, x_host):
out_ = m(data)
if h_out is None:
h_out = out_
else:
h_out += out_
else:
h_out = self.host_model[0](x_host)
return self.activation(g_out + h_out)
| 5,516 | 33.917722 | 113 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/__init__.py
|
try:
from federatedml.nn.backend.torch import nn, init, operation, optim, serialization
except ImportError:
nn, init, operation, optim, serialization = None, None, None, None, None
__all__ = ['nn', 'init', 'operation', 'optim', 'serialization']
| 254 | 35.428571 | 86 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/operation.py
|
import torch as t
import copy
from torch.nn import Module
class OpBase(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
ret = copy.deepcopy(self.param_dict)
ret['op'] = type(self).__name__
return ret
class Astype(Module, OpBase):
def __init__(self, cast_type: str):
OpBase.__init__(self)
Module.__init__(self)
assert cast_type in [
'float',
'int',
'bool',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'float16']
self.param_dict['cast_type'] = cast_type
self.cast_type = cast_type
self.cast_type_map = {
'float': t.float,
'int': t.int,
'bool': t.bool,
'float32': t.float32,
'float64': t.float64,
'float16': t.float16,
'int8': t.int8,
'int16': t.int16,
'int32': t.int32,
'int64': t.int64,
}
def forward(self, tensor: t.Tensor, **kwargs):
return tensor.type(self.cast_type_map[self.cast_type])
class Flatten(Module, OpBase):
def __init__(self, start_dim=0, end_dim=-1):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict['start_dim'] = start_dim
self.param_dict['end_dim'] = end_dim
def forward(self, tensor):
return tensor.flatten(**self.param_dict)
class Reshape(Module, OpBase):
def __init__(self, shape):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(shape, tuple) or isinstance(shape, list)
self.shape = shape
self.param_dict['shape'] = list(shape)
def forward(self, tensor: t.Tensor):
return tensor.reshape(shape=self.shape)
class Index(Module, OpBase):
def __init__(self, index):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(index, int)
self.param_dict['index'] = index
def forward(self, content):
return content[self.param_dict['index']]
class Select(Module, OpBase):
def __init__(self, dim, idx):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'index': idx}
def forward(self, tensor):
return tensor.select(self.param_dict['dim'], self.param_dict['index'])
class SelectRange(Module, OpBase):
def __init__(self, dim, start, end):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'start': start, 'end': end}
def forward(self, tensor):
return tensor.select(
self.param_dict['dim'], -1)[self.param_dict['start']: self.param_dict['end']]
class Sum(Module, OpBase):
def __init__(self, dim):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(dim, int)
self.param_dict['dim'] = dim
def forward(self, tensor):
return tensor.sum(dim=self.param_dict['dim'])
class Squeeze(Module, OpBase):
def __init__(self, **kwargs):
OpBase.__init__(self)
Module.__init__(self)
def forward(self, tensor: t.Tensor):
return tensor.squeeze()
class Unsqueeze(Sum, OpBase):
def __init__(self, dim):
super(Unsqueeze, self).__init__(dim)
def forward(self, tensor: t.Tensor):
return tensor.unsqueeze(self.param_dict['dim'])
| 3,475 | 23.652482 | 89 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/serialization.py
|
import copy
import inspect
from collections import OrderedDict
try:
from torch.nn import Sequential as tSeq
from federatedml.nn.backend.torch import optim, init, nn
from federatedml.nn.backend.torch import operation
from federatedml.nn.backend.torch.base import Sequential, get_torch_instance
from federatedml.nn.backend.torch.cust import CustModel, CustLoss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def recover_layer_from_dict(nn_define, nn_dict):
init_param_dict = copy.deepcopy(nn_define)
if 'layer' in nn_define:
class_name = nn_define['layer']
init_param_dict.pop('layer')
elif 'op' in nn_define:
class_name = nn_define['op']
init_param_dict.pop('op')
else:
raise ValueError(
'no layer or operation info found in nn define, please check your layer config and make'
'sure they are correct for pytorch backend')
if 'initializer' in init_param_dict:
init_param_dict.pop('initializer')
# find corresponding class
if class_name == CustModel.__name__:
nn_layer_class = CustModel
elif class_name == InteractiveLayer.__name__:
nn_layer_class = InteractiveLayer
else:
nn_layer_class = nn_dict[class_name]
# create layer or Module
if nn_layer_class == CustModel: # converto to pytorch model
layer: CustModel = CustModel(module_name=init_param_dict['module_name'],
class_name=init_param_dict['class_name'],
**init_param_dict['param'])
layer = layer.get_pytorch_model()
elif nn_layer_class == InteractiveLayer:
layer: InteractiveLayer = InteractiveLayer(**init_param_dict)
else:
layer = get_torch_instance(nn_layer_class, init_param_dict)
# initialize if there are configs
if 'initializer' in nn_define:
if 'weight' in nn_define['initializer']:
init_para = nn_define['initializer']['weight']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, **init_para['param'])
if 'bias' in nn_define['initializer']:
init_para = nn_define['initializer']['bias']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, init='bias', **init_para['param'])
return layer, class_name
def recover_sequential_from_dict(nn_define):
nn_define_dict = nn_define
nn_dict = dict(inspect.getmembers(nn))
op_dict = dict(inspect.getmembers(operation))
nn_dict.update(op_dict)
class_name_list = []
try:
# submitted model have int prefixes, they make sure that layers are in
# order
add_dict = OrderedDict()
keys = list(nn_define_dict.keys())
keys = sorted(keys, key=lambda x: int(x.split('-')[0]))
for k in keys:
layer, class_name = recover_layer_from_dict(nn_define_dict[k], nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
except BaseException:
add_dict = OrderedDict()
for k, v in nn_define_dict.items():
layer, class_name = recover_layer_from_dict(v, nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
if len(class_name_list) == 1 and class_name_list[0] == CustModel.__name__:
# If there are only a CustModel, return the model only
return list(add_dict.values())[0]
else:
return tSeq(add_dict)
def recover_optimizer_from_dict(define_dict):
opt_dict = dict(inspect.getmembers(optim))
from federatedml.util import LOGGER
LOGGER.debug('define dict is {}'.format(define_dict))
if 'optimizer' not in define_dict:
raise ValueError('please specify optimizer type in the json config')
opt_class = opt_dict[define_dict['optimizer']]
param_dict = copy.deepcopy(define_dict)
if 'optimizer' in param_dict:
param_dict.pop('optimizer')
if 'config_type' in param_dict:
param_dict.pop('config_type')
return opt_class(**param_dict)
def recover_loss_fn_from_dict(define_dict):
loss_fn_dict = dict(inspect.getmembers(nn))
if 'loss_fn' not in define_dict:
raise ValueError('please specify loss function in the json config')
param_dict = copy.deepcopy(define_dict)
param_dict.pop('loss_fn')
if define_dict['loss_fn'] == CustLoss.__name__:
return CustLoss(loss_module_name=param_dict['loss_module_name'],
class_name=param_dict['class_name'],
**param_dict['param']).get_pytorch_model()
else:
return loss_fn_dict[define_dict['loss_fn']](**param_dict)
if __name__ == '__main__':
pass
| 4,832 | 36.757813 | 100 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/torch_modules_extract/extract_pytorch_modules.py
|
import inspect
from torch.nn.modules import linear, activation, rnn, dropout, sparse, pooling, conv, transformer, batchnorm
from torch.nn.modules import padding, pixelshuffle
from torch.nn.modules import loss
class Required(object):
def __init__(self):
pass
def __repr__(self):
return '(Required Parameter)'
def get_all_class_obj(module, key_word=''):
members = inspect.getmembers(module)
rs = []
module_name = None
for name, obj in members:
if inspect.isclass(obj):
if 'modules.' + key_word in obj.__module__:
rs.append(obj)
# print(obj)
module_name = obj.__module__.split('.')[-1]
return rs, module_name
def extract_init_param(class_):
args = inspect.getfullargspec(class_.__init__)
print(class_)
print(args)
keys = args[0][1:]
if len(keys) == 0:
return {}
defaults = args[3]
args_map = {}
print(keys)
print(defaults)
if defaults is not None:
for idx, i in enumerate(keys[-len(defaults):]):
print(args_map)
print(defaults)
args_map[i] = defaults[idx]
for i in keys:
if i not in args_map:
args_map[i] = Required()
return args_map
def code_assembly(param, nn_class, module_name):
if module_name == 'loss':
parent_class = 'FateTorchLoss'
else:
parent_class = 'FateTorchLayer'
para_str = ""
non_default_param = ""
init_str = """"""
for k, v in param.items():
new_para = "\n self.param_dict['{}'] = {}".format(k, k)
init_str += new_para
if isinstance(v, Required):
non_default_param += str(k)
non_default_param += ', '
continue
para_str += str(k)
if isinstance(v, str):
para_str += "='{}'".format(v)
else:
para_str += "={}".format(str(v))
para_str += ', '
para_str = non_default_param + para_str
init_ = """
def __init__(self, {}**kwargs):
{}.__init__(self){}
self.param_dict.update(kwargs)
nn.modules.{}.{}.__init__(self, **self.param_dict)
""".format(para_str, parent_class, init_str, module_name, nn_class)
code = """
class {}({}, {}):
{}
""".format(nn_class, 'nn.modules.{}.{}'.format(module_name, nn_class), parent_class, init_)
return code
if __name__ == '__main__':
rs1 = get_all_class_obj(linear, 'linear')
rs2 = get_all_class_obj(rnn, 'rnn')
rs3 = get_all_class_obj(sparse, 'sparse')
rs4 = get_all_class_obj(dropout, 'dropout')
rs5 = get_all_class_obj(activation, 'activation')
rs6 = get_all_class_obj(conv, 'conv')
rs7 = get_all_class_obj(transformer, 'transformer')
rs8 = get_all_class_obj(pooling, 'pooling')
rs9 = get_all_class_obj(batchnorm, 'batchnorm')
rs10 = get_all_class_obj(padding, 'padding')
rs11 = get_all_class_obj(pixelshuffle, 'pixielshuffle')
rs12 = get_all_class_obj(loss, 'loss')
module_str = """"""
module_str += "from torch import nn\n\n"
for rs in [rs1, rs2, rs3, rs4, rs5, rs6, rs7, rs8, rs9, rs10, rs11, rs12]:
module_name = rs[1]
for i in rs[0]:
# print(i)
param = extract_init_param(i)
class_str = code_assembly(param, i.__name__, module_name)
module_str += class_str
module_str = module_str
open('../_nn.py', 'w').write(module_str)
| 3,479 | 27.064516 | 108 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/torch_modules_extract/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/torch_modules_extract/extract_pytorch_optim.py
|
import inspect
from torch import optim
from federatedml.nn.backend.torch.torch_modules_extract.extract_pytorch_modules import extract_init_param, Required
from torch.optim.optimizer import required
def code_assembly(param, nn_class):
para_str = ""
non_default_param = ""
init_str = """"""
special_param = ''
for k, v in param.items():
if k == 'params':
k = 'params'
v = None
special_param = k + '=' + str(v) + ', '
continue
else:
new_para = "\n self.param_dict['{}'] = {}".format(k, k)
init_str += new_para
if isinstance(v, Required) or v == required:
non_default_param += str(k)
non_default_param += ', '
continue
para_str += str(k)
if isinstance(v, str):
para_str += "='{}'".format(v)
else:
para_str += "={}".format(str(v))
para_str += ', '
para_str = non_default_param + special_param + para_str
init_ = """
def __init__(self, {}):
FateTorchOptimizer.__init__(self){}
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.{}.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except:
return 'Optimizer {} without initiated parameters'.format(type(self).__name__)
""".format(para_str, init_str, nn_class, nn_class)
code = """
class {}(optim.{}, FateTorchOptimizer):
{}
""".format(nn_class, nn_class, init_)
return code
if __name__ == '__main__':
memb = inspect.getmembers(optim)
module_str = """"""
module_str += "from torch import optim\nfrom federatedml.nn.backend.torch.base import FateTorchLayer, Sequential\n" \
"from federatedml.nn.backend.torch.base import FateTorchOptimizer\n\n"
for k, v in memb:
if inspect.isclass(v) and k != 'Optimizer':
param = extract_init_param(v)
code = code_assembly(param, k)
module_str += code
open('../_optim.py', 'w').write(module_str)
| 2,304 | 27.45679 | 121 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/torch/test/test_cust_model.py
|
from federatedml.nn.backend.torch import nn, init
import json
from federatedml.nn.backend.torch import serialization as s
import torch as t
from federatedml.nn.backend.torch.import_hook import fate_torch_hook
from federatedml.nn.backend.torch.cust import CustModel
fate_torch_hook(t)
cust_resnet = CustModel(name='resnet')
transformer = nn.Transformer()
seq = nn.Sequential(
nn.Linear(10, 10),
CustModel(name='lr', param={'input_size': 2}),
CustModel(name='mf', param={'u_num': 100, 'i_num': 100, 'embd_dim': 32}),
CustModel(name='resnet'),
transformer,
)
nn_define_json = json.dumps(seq.to_dict(), indent=3)
nn_define = seq.to_dict()
recover_seq = s.recover_sequential_from_dict(nn_define)
| 713 | 31.454545 | 77 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/deepspeed_util.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import deepspeed
except ModuleNotFoundError:
from federatedml.util import LOGGER
LOGGER.warning("Try to Import DeepSpeed ERROR, Will Not Support Using DeepSpeed")
def deepspeed_init(model, ds_config):
deepspeed.init_distributed()
model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
model, optimizer, _, _ = deepspeed.initialize(model=model,
model_parameters=model_parameters,
config=ds_config)
return model, optimizer
def is_zero3(ds_config):
return ds_config.get("zero_optimization", {}).get("stage", -1) == 3
def init_deepspeed_env(ds_config):
"""
to enabled deepspeed stage3, these should be call first
"""
if is_zero3(ds_config):
from transformers.deepspeed import HfDeepSpeedConfig
HfDeepSpeedConfig(ds_config)
def gather_model(model):
while hasattr(model, "module"):
model = model.module
for _, p in model.named_parameters():
p.all_gather()
| 1,695 | 30.407407 | 85 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/data.py
|
import numpy as np
from torch.utils.data import Dataset as torchDataset
from federatedml.util import LOGGER
from federatedml.nn.dataset.base import Dataset, get_dataset_class
from federatedml.nn.dataset.image import ImageDataset
from federatedml.nn.dataset.table import TableDataset
from federatedml.nn.dataset.graph import GraphDataset
def try_dataset_class(dataset_class, path, param):
# try default dataset
try:
dataset_inst: Dataset = dataset_class(**param)
dataset_inst.load(path)
return dataset_inst
except Exception as e:
LOGGER.warning('try to load dataset failed, exception :{}'.format(e))
return None
def load_dataset(dataset_name, data_path_or_dtable, param, dataset_cache: dict):
# load dataset class
if isinstance(data_path_or_dtable, str):
cached_id = data_path_or_dtable
else:
cached_id = str(id(data_path_or_dtable))
if cached_id in dataset_cache:
LOGGER.debug('use cached dataset, cached id {}'.format(cached_id))
return dataset_cache[cached_id]
if dataset_name is None or dataset_name == '':
# automatically match default dataset
LOGGER.info('dataset is not specified, use auto inference')
for ds_class in [TableDataset, ImageDataset, GraphDataset]:
dataset_inst = try_dataset_class(
ds_class, data_path_or_dtable, param=param)
if dataset_inst is not None:
break
if dataset_inst is None:
raise ValueError(
'cannot find default dataset that can successfully load data from path {}, '
'please check the warning message for error details'. format(data_path_or_dtable))
else:
# load specified dataset
dataset_class = get_dataset_class(dataset_name)
dataset_inst = dataset_class(**param)
dataset_inst.load(data_path_or_dtable)
dataset_cache[cached_id] = dataset_inst
return dataset_inst
def get_ret_predict_table(id_table, pred_table, classes, partitions, computing_session):
id_dtable = computing_session.parallelize(
id_table, partition=partitions, include_key=True)
pred_dtable = computing_session.parallelize(
pred_table, partition=partitions, include_key=True)
return id_dtable, pred_dtable
def add_match_id(id_table: list, dataset_inst: TableDataset):
assert isinstance(dataset_inst, TableDataset), 'when using match id your dataset must be a Table Dataset'
for id_inst in id_table:
id_inst[1].inst_id = dataset_inst.match_ids[id_inst[0]]
| 2,598 | 35.605634 | 109 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/rng.py
|
import random
from fate_arch.session import computing_session
import numpy as np
from federatedml.secureprotol.paillier_tensor import PaillierTensor
BITS = 10
MIXED_RATE = 0.5
class RandomNumberGenerator(object):
def __init__(self):
self.lower_bound = -2 ** BITS
self.upper_bound = 2 ** BITS
@staticmethod
def get_size_by_shape(shape):
size = 1
for dim in shape:
size *= dim
return size
def generate_random_number_1d(
self,
size,
mixed_rate=MIXED_RATE,
keep=None):
if keep is not None:
ret = [0] * size
for i in range(size):
if keep[i]:
rng = random.SystemRandom().uniform(
self.lower_bound,
self.upper_bound) if np.random.rand() < mixed_rate else np.random.uniform(
self.lower_bound,
self.upper_bound)
ret[i] = rng
return np.array(ret)[keep]
else:
return [
random.SystemRandom().uniform(
self.lower_bound,
self.upper_bound) if np.random.rand() < mixed_rate else np.random.uniform(
self.lower_bound,
self.upper_bound) for _ in range(size)]
def generate_random_number(
self,
shape=None,
mixed_rate=MIXED_RATE,
keep=None):
if keep is not None:
size = self.get_size_by_shape(keep.shape)
return self.generate_random_number_1d(
size, mixed_rate=mixed_rate, keep=keep)
else:
size = self.get_size_by_shape(shape)
return np.reshape(
self.generate_random_number_1d(
size, mixed_rate=mixed_rate), shape)
def fast_generate_random_number(
self,
shape,
partition=10,
mixed_rate=MIXED_RATE,
keep_table=None):
if keep_table:
tb = keep_table.mapValues(
lambda keep_array: self.generate_random_number(
keep=keep_array, mixed_rate=mixed_rate))
return PaillierTensor(tb)
else:
tb = computing_session.parallelize(
[None for _ in range(shape[0])], include_key=False, partition=partition)
tb = tb.mapValues(lambda val: self.generate_random_number(
shape[1:], mixed_rate=mixed_rate))
return PaillierTensor(tb)
| 2,603 | 30.373494 | 98 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/common.py
|
import torch as t
import numpy as np
import tempfile
ML_PATH = 'federatedml.nn'
LLM_PATH = "fate_llm"
HOMOMODELMETA = "HomoNNMeta"
HOMOMODELPARAM = "HomoNNParam"
def global_seed(seed):
# set random seed of torch
t.manual_seed(seed)
t.cuda.manual_seed_all(seed)
t.backends.cudnn.deterministic = True
def get_homo_model_dict(param, meta):
return {HOMOMODELPARAM: param, # param
HOMOMODELMETA: meta} # meta
def get_homo_param_meta(model_dict):
return model_dict.get(HOMOMODELPARAM), model_dict.get(HOMOMODELMETA)
# read model from model bytes
def recover_model_bytes(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
model_dict = t.load(f)
return model_dict
def get_torch_model_bytes(model_dict):
with tempfile.TemporaryFile() as f:
t.save(model_dict, f)
f.seek(0)
model_saved_bytes = f.read()
return model_saved_bytes
| 968 | 20.065217 | 72 |
py
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/backend/utils/distributed_util.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch.distributed as dist
def is_rank_0():
return dist.get_rank() == 0
def is_distributed():
return dist.is_initialized()
def get_num_workers():
return dist.get_world_size()
| 815 | 27.137931 | 75 |
py
|
FATE
|
FATE-master/python/federatedml/nn/homo/server.py
|
from federatedml.model_base import ModelBase
from federatedml.param.homo_nn_param import HomoNNParam
from federatedml.nn.homo.trainer.trainer_base import get_trainer_class
from federatedml.model_base import MetricMeta
from federatedml.util import LOGGER
from federatedml.nn.homo.client import NNModelExporter
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.nn.backend.utils.common import get_homo_param_meta, recover_model_bytes
class HomoNNServer(ModelBase):
def __init__(self):
super(HomoNNServer, self).__init__()
self.model_param = HomoNNParam()
self.trainer = None
self.trainer_param = None
# arbiter side models
self.model = None
self.model_loaded = False
# arbiter saved extra status
self.exporter = NNModelExporter()
self.extra_data = {}
# warm start
self.warm_start_iter = None
def export_model(self):
if self.model is None:
LOGGER.debug('export an empty model')
return self.exporter.export_model_dict() # return an exporter
return self.model
def load_model(self, model_dict):
if model_dict is not None:
model_dict = list(model_dict["model"].values())[0]
self.model = model_dict
param, meta = get_homo_param_meta(self.model)
# load extra data
self.extra_data = recover_model_bytes(param.extra_data_bytes)
self.warm_start_iter = param.epoch_idx
def _init_model(self, param: HomoNNParam()):
train_param = param.trainer.to_dict()
self.trainer = train_param['trainer_name']
self.trainer_param = train_param['param']
LOGGER.debug('trainer and trainer param {} {}'.format(
self.trainer, self.trainer_param))
def fit(self, data_instance=None, validate_data=None):
# fate loss callback setting
self.callback_meta(
"loss", "train", MetricMeta(
name="train", metric_type="LOSS", extra_metas={
"unit_name": "aggregate_round"}))
# display warmstart iter
if self.component_properties.is_warm_start:
self.callback_warm_start_init_iter(self.warm_start_iter)
# initialize trainer
trainer_class = get_trainer_class(self.trainer)
LOGGER.info('trainer class is {}'.format(trainer_class))
# init trainer
trainer_inst = trainer_class(**self.trainer_param)
# set tracker for fateboard callback
trainer_inst.set_tracker(self.tracker)
# set exporter
trainer_inst.set_model_exporter(self.exporter)
# set chceckpoint
trainer_inst.set_checkpoint(ModelCheckpoint(self, save_freq=1))
# run trainer server procedure
trainer_inst.server_aggregate_procedure(self.extra_data)
# aggregation process is done, get exported model if any
self.model = trainer_inst.get_cached_model()
self.set_summary(trainer_inst.get_summary())
def predict(self, data_inst):
return None
| 3,107 | 35.139535 | 88 |
py
|
FATE
|
FATE-master/python/federatedml/nn/homo/client.py
|
import json
import torch
import inspect
from fate_arch.computing.non_distributed import LocalData
from fate_arch.computing import is_table
from federatedml.model_base import ModelBase
from federatedml.nn.homo.trainer.trainer_base import get_trainer_class, TrainerBase
from federatedml.nn.backend.utils.data import load_dataset
from federatedml.nn.backend.utils import deepspeed_util
from federatedml.param.homo_nn_param import HomoNNParam
from federatedml.nn.backend.torch import serialization as s
from federatedml.nn.backend.torch.base import FateTorchOptimizer
from federatedml.model_base import MetricMeta
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.nn.homo.trainer.trainer_base import StdReturnFormat
from federatedml.nn.backend.utils.common import global_seed, get_homo_model_dict, get_homo_param_meta, recover_model_bytes, get_torch_model_bytes
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.statistic.data_overview import check_with_inst_id
from federatedml.nn.homo.trainer.trainer_base import ExporterBase
from fate_arch.session import computing_session
from federatedml.nn.backend.utils.data import get_ret_predict_table
from federatedml.nn.backend.utils.data import add_match_id
from federatedml.protobuf.generated.homo_nn_model_param_pb2 import HomoNNParam as HomoNNParamPB
from federatedml.protobuf.generated.homo_nn_model_meta_pb2 import HomoNNMeta as HomoNNMetaPB
class NNModelExporter(ExporterBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def export_model_dict(
self,
model=None,
optimizer=None,
model_define=None,
optimizer_define=None,
loss_define=None,
epoch_idx=-1,
converge_status=False,
loss_history=None,
best_epoch=-1,
local_save_path='',
extra_data={}):
if issubclass(type(model), torch.nn.Module):
model_statedict = model.state_dict()
else:
model_statedict = None
opt_state_dict = None
if optimizer is not None:
assert isinstance(optimizer, torch.optim.Optimizer), \
'optimizer must be an instance of torch.optim.Optimizer'
opt_state_dict = optimizer.state_dict()
model_status = {
'model': model_statedict,
'optimizer': opt_state_dict,
}
model_saved_bytes = get_torch_model_bytes(model_status)
extra_data_bytes = get_torch_model_bytes(extra_data)
param = HomoNNParamPB()
meta = HomoNNMetaPB()
# save param
param.model_bytes = model_saved_bytes
param.extra_data_bytes = extra_data_bytes
param.epoch_idx = epoch_idx
param.converge_status = converge_status
param.best_epoch = best_epoch
param.local_save_path = local_save_path
if loss_history is None:
loss_history = []
param.loss_history.extend(loss_history)
# save meta
meta.nn_define.append(json.dumps(model_define))
meta.optimizer_define.append(json.dumps(optimizer_define))
meta.loss_func_define.append(json.dumps(loss_define))
return get_homo_model_dict(param, meta)
class HomoNNClient(ModelBase):
def __init__(self):
super(HomoNNClient, self).__init__()
self.model_param = HomoNNParam()
self.trainer = consts.FEDAVG_TRAINER
self.trainer_param = {}
self.dataset_module = None
self.dataset = None
self.dataset_param = {}
self.torch_seed = None
self.loss = None
self.optimizer = None
self.nn_define = None
# running varialbles
self.trainer_inst = None
# export model
self.exporter = NNModelExporter()
self.model_loaded = False
self.model = None
# cache dataset
self.cache_dataset = {}
# dtable partitions
self.partitions = 4
# warm start display iter
self.warm_start_iter = None
# deepspeed
self.ds_config = None
self._ds_stage = -1
self.model_save_flag = False
def _init_model(self, param: HomoNNParam):
train_param = param.trainer.to_dict()
dataset_param = param.dataset.to_dict()
self.trainer = train_param['trainer_name']
self.dataset = dataset_param['dataset_name']
self.trainer_param = train_param['param']
self.dataset_param = dataset_param['param']
self.torch_seed = param.torch_seed
self.nn_define = param.nn_define
self.loss = param.loss
self.optimizer = param.optimizer
self.ds_config = param.ds_config
def init(self):
# set random seed
global_seed(self.torch_seed)
if self.ds_config:
deepspeed_util.init_deepspeed_env(self.ds_config)
# load trainer class
if self.trainer is None:
raise ValueError(
'Trainer is not specified, please specify your trainer')
trainer_class = get_trainer_class(self.trainer)
LOGGER.info('trainer class is {}'.format(trainer_class))
# recover model from model config / or recover from saved model param
loaded_model_dict = None
# if has model protobuf, load model config from protobuf
load_opt_state_dict = False
if self.model_loaded:
param, meta = get_homo_param_meta(self.model)
LOGGER.info('save path is {}'.format(param.local_save_path))
if param.local_save_path == '':
LOGGER.info('Load model from model protobuf')
self.warm_start_iter = param.epoch_idx
if param is None or meta is None:
raise ValueError(
'model protobuf is None, make sure'
'that your trainer calls export_model() function to save models')
if meta.nn_define[0] is None:
raise ValueError(
'nn_define is None, model protobuf has no nn-define, make sure'
'that your trainer calls export_model() function to save models')
self.nn_define = json.loads(meta.nn_define[0])
loss = json.loads(meta.loss_func_define[0])
optimizer = json.loads(meta.optimizer_define[0])
loaded_model_dict = recover_model_bytes(param.model_bytes)
extra_data = recover_model_bytes(param.extra_data_bytes)
else:
LOGGER.info('Load model from local save path')
save_dict = torch.load(open(param.local_save_path, 'rb'))
self.warm_start_iter = save_dict['epoch_idx']
self.nn_define = save_dict['model_define']
loss = save_dict['loss_define']
optimizer = save_dict['optimizer_define']
loaded_model_dict = save_dict
extra_data = save_dict['extra_data']
if self.optimizer is not None and optimizer != self.optimizer:
LOGGER.info('optimizer updated')
else:
self.optimizer = optimizer
load_opt_state_dict = True
if self.loss is not None and self.loss != loss:
LOGGER.info('loss updated')
else:
self.loss = loss
else:
extra_data = {}
# check key param
if self.nn_define is None:
raise ValueError(
'Model structure is not defined, nn_define is None, please check your param')
# get model from nn define
model = s.recover_sequential_from_dict(self.nn_define)
if loaded_model_dict:
model.load_state_dict(loaded_model_dict['model'])
LOGGER.info('load model state dict from check point')
LOGGER.info('model structure is {}'.format(model))
# init optimizer
if self.optimizer is not None and not self.ds_config:
optimizer_: FateTorchOptimizer = s.recover_optimizer_from_dict(
self.optimizer)
# pass model parameters to optimizer
optimizer = optimizer_.to_torch_instance(model.parameters())
if load_opt_state_dict:
LOGGER.info('load optimizer state dict')
optimizer.load_state_dict(loaded_model_dict['optimizer'])
LOGGER.info('optimizer is {}'.format(optimizer))
else:
optimizer = None
LOGGER.info('optimizer is not specified')
# init loss
if self.loss is not None:
loss_fn = s.recover_loss_fn_from_dict(self.loss)
LOGGER.info('loss function is {}'.format(loss_fn))
else:
loss_fn = None
LOGGER.info('loss function is not specified')
# init trainer
trainer_inst: TrainerBase = trainer_class(**self.trainer_param)
LOGGER.info('trainer class is {}'.format(trainer_class))
trainer_train_args = inspect.getfullargspec(trainer_inst.train).args
args_format = [
'self',
'train_set',
'validate_set',
'optimizer',
'loss',
'extra_data'
]
if len(trainer_train_args) < 6:
raise ValueError(
'Train function of trainer should take 6 arguments :{}, but current trainer.train '
'only takes {} arguments: {}'.format(
args_format, len(trainer_train_args), trainer_train_args))
trainer_inst.set_nn_config(self.nn_define, self.optimizer, self.loss)
trainer_inst.fed_mode = True
if self.ds_config:
model, optimizer = deepspeed_util.deepspeed_init(model, self.ds_config)
trainer_inst.enable_deepspeed(is_zero_3=deepspeed_util.is_zero3(self.ds_config))
if deepspeed_util.is_zero3(self.ds_config):
model.train()
return trainer_inst, model, optimizer, loss_fn, extra_data
def fit(self, train_input, validate_input=None):
LOGGER.debug('train input is {}'.format(train_input))
# train input & validate input are DTables or path str
if not is_table(train_input):
if isinstance(train_input, LocalData):
train_input = train_input.path
assert train_input is not None, 'input train path is None!'
if not is_table(validate_input):
if isinstance(validate_input, LocalData):
validate_input = validate_input.path
assert validate_input is not None, 'input validate path is None!'
# fate loss callback setting
self.callback_meta(
"loss",
"train",
MetricMeta(
name="train",
metric_type="LOSS",
extra_metas={
"unit_name": "epochs"}))
# set random seed
global_seed(self.torch_seed)
self.trainer_inst, model, optimizer, loss_fn, extra_data = self.init()
self.trainer_inst.set_model(model)
self.trainer_inst.set_tracker(self.tracker)
self.trainer_inst.set_model_exporter(self.exporter)
# load dataset class
dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=train_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param
)
# set dataset prefix
dataset_inst.set_type('train')
LOGGER.info('train dataset instance is {}'.format(dataset_inst))
if validate_input:
val_dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=validate_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param
)
if id(val_dataset_inst) != id(dataset_inst):
dataset_inst.set_type('validate')
LOGGER.info('validate dataset instance is {}'.format(dataset_inst))
else:
val_dataset_inst = None
# display warmstart iter
if self.component_properties.is_warm_start:
self.callback_warm_start_init_iter(self.warm_start_iter)
# set model check point
self.trainer_inst.set_checkpoint(ModelCheckpoint(self, save_freq=1))
# training
self.trainer_inst.train(
dataset_inst,
val_dataset_inst,
optimizer,
loss_fn,
extra_data
)
# training is done, get exported model
self.model = self.trainer_inst.get_cached_model()
self.set_summary(self.trainer_inst.get_summary())
def predict(self, cpn_input):
with_inst_id = False
schema = None
if not is_table(cpn_input):
if isinstance(cpn_input, LocalData):
cpn_input = cpn_input.path
assert cpn_input is not None, 'input path is None!'
elif is_table(cpn_input):
with_inst_id = check_with_inst_id(cpn_input)
schema = cpn_input.schema
LOGGER.info('running predict')
if self.trainer_inst is None:
# init model
self.trainer_inst, model, optimizer, loss_fn, _ = self.init()
self.trainer_inst.set_model(model)
self.trainer_inst.set_tracker(self.tracker)
dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=cpn_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param)
if not dataset_inst.has_dataset_type():
dataset_inst.set_type('predict')
trainer_ret = self.trainer_inst.predict(dataset_inst)
if trainer_ret is None or not isinstance(trainer_ret, StdReturnFormat):
LOGGER.info(
'trainer did not return formatted predicted result, skip predict')
return None
id_table, pred_table, classes = trainer_ret()
if with_inst_id: # set match id
add_match_id(id_table=id_table, dataset_inst=dataset_inst)
id_dtable, pred_dtable = get_ret_predict_table(
id_table, pred_table, classes, self.partitions, computing_session)
ret_table = self.predict_score_to_output(
id_dtable, pred_dtable, classes)
if schema is not None:
self.set_predict_data_schema(ret_table, schema)
return ret_table
def export_model(self):
if self.model is None:
LOGGER.debug('export an empty model')
return self.exporter.export_model_dict() # return an empty model
return self.model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
self.model = model_dict
self.model_loaded = True
# override function
@staticmethod
def set_predict_data_schema(predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {
"header": [
"label",
"predict_result",
"predict_score",
"predict_detail",
"type",
],
"sid": 'id',
"content_type": "predict_result"
}
if schema.get("match_id_name") is not None:
predict_data.schema["match_id_name"] = schema.get(
"match_id_name")
return predict_data
| 15,960 | 35.861432 | 145 |
py
|
FATE
|
FATE-master/python/federatedml/nn/homo/__init__.py
| 0 | 0 | 0 |
py
|
|
FATE
|
FATE-master/python/federatedml/nn/homo/trainer/fedavg_trainer.py
|
import torch
import torch as t
import torch.distributed as dist
import tqdm
import numpy as np
import transformers
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient as SecureAggClient
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorServer as SecureAggServer
from federatedml.nn.backend.utils import deepspeed_util
from federatedml.nn.backend.utils import distributed_util
from federatedml.nn.dataset.base import Dataset
from federatedml.nn.homo.trainer.trainer_base import TrainerBase
from federatedml.util import LOGGER, consts
from federatedml.optim.convergence import converge_func_factory
class FedAVGTrainer(TrainerBase):
"""
Parameters
----------
epochs: int >0, epochs to train
batch_size: int, -1 means full batch
secure_aggregate: bool, default is True, whether to use secure aggregation. if enabled, will add random number
mask to local models. These random number masks will eventually cancel out to get 0.
weighted_aggregation: bool, whether add weight to each local model when doing aggregation.
if True, According to origin paper, weight of a client is: n_local / n_global, where n_local
is the sample number locally and n_global is the sample number of all clients.
if False, simply averaging these models.
early_stop: None, 'diff' or 'abs'. if None, disable early stop; if 'diff', use the loss difference between
two epochs as early stop condition, if differences < tol, stop training ; if 'abs', if loss < tol,
stop training
tol: float, tol value for early stop
aggregate_every_n_epoch: None or int. if None, aggregate model on the end of every epoch, if int, aggregate
every n epochs.
cuda: None, int or list of int. if None, use cpu; if int, use the the {int} device, if list of int, use the
This trainier will automatically detect use DataParallel for multi GPU training, the first index will be
the main device and the output device.
pin_memory: bool, for pytorch DataLoader
shuffle: bool, for pytorch DataLoader
data_loader_worker: int, for pytorch DataLoader, number of workers when loading data
validation_freqs: None or int. if int, validate your model and send validate results to fate-board every n epoch.
if is binary classification task, will use metrics 'auc', 'ks', 'gain', 'lift', 'precision'
if is multi classification task, will use metrics 'precision', 'recall', 'accuracy'
if is regression task, will use metrics 'mse', 'mae', 'rmse', 'explained_variance', 'r2_score'
checkpoint_save_freqs: save model every n epoch, if None, will not save checkpoint.
task_type: str, 'auto', 'binary', 'multi', 'regression',
this option decides the return format of this trainer, and the evaluation type when running validation.
if auto, will automatically infer your task type from labels and predict results.
save_to_local_dir: bool, if True, a dictionary containing the model, optimizer, and metadata will be saved to a local directory.
The path is structured as follows: fateflow/jobs/${jobid}/${party}/${party_id}/${your_nn_component}.
If set to False, the model will not be saved to the FATE framework in protobuf format.
"""
def __init__(self, epochs=10, batch_size=512, # training parameter
early_stop=None, tol=0.0001, # early stop parameters
secure_aggregate=True, weighted_aggregation=True, aggregate_every_n_epoch=None, # federation
cuda=None,
pin_memory=True, shuffle=True, data_loader_worker=0, # GPU & dataloader
validation_freqs=None, # validation configuration
checkpoint_save_freqs=None, # checkpoint configuration
task_type='auto', # task type
save_to_local_dir=False, # save model to local path
collate_fn=None,
collate_fn_params=None
):
super(FedAVGTrainer, self).__init__()
# training parameters
self.epochs = epochs
self.tol = tol
self.validation_freq = validation_freqs
self.save_freq = checkpoint_save_freqs
self.save_to_local_dir = save_to_local_dir
self.task_type = task_type.lower()
task_type_allow = [
consts.BINARY,
consts.REGRESSION,
consts.MULTY,
consts.CAUSAL_LM,
consts.SEQ_2_SEQ_LM,
'auto']
assert self.task_type in task_type_allow, 'task type must in {}'.format(
task_type_allow)
# aggregation param
self.secure_aggregate = secure_aggregate
self.weighted_aggregation = weighted_aggregation
self.aggregate_every_n_epoch = aggregate_every_n_epoch
# GPU, check cuda setting
self.cuda = cuda
self.cuda_main_device = None
self.data_parallel = False
self.parallel_model = None
if not torch.cuda.is_available() and self.cuda is not None:
raise ValueError('Cuda is not available on this machine')
if isinstance(self.cuda, int):
self.cuda_main_device = self.cuda
elif isinstance(self.cuda, list):
for i in self.cuda:
assert isinstance(i, int), 'cuda device must be int, but got {}'.format(self.cuda)
self.cuda_main_device = self.cuda[0]
if len(self.cuda) > 1:
self.data_parallel = True
LOGGER.info('Using DataParallel in Pytorch')
# data loader
self.batch_size = batch_size
self.pin_memory = pin_memory
self.shuffle = shuffle
self.data_loader_worker = data_loader_worker
self.data_loader = None
self.collate_fn = collate_fn
self.collate_fn_params = collate_fn_params if collate_fn_params is not None else dict()
self.early_stop = early_stop
early_stop_type = ['diff', 'abs']
if early_stop is not None:
assert early_stop in early_stop_type, 'early stop type must be in {}, bug got {}' \
.format(early_stop_type, early_stop)
# communicate suffix
self.comm_suffix = 'fedavg'
# check param correctness
self.check_trainer_param([self.epochs,
self.validation_freq,
self.save_freq,
self.aggregate_every_n_epoch],
['epochs',
'validation_freq',
'save_freq',
'aggregate_every_n_epoch'],
self.is_pos_int,
'{} is not a positive int')
self.check_trainer_param([self.secure_aggregate, self.weighted_aggregation, self.pin_memory, self.save_to_local_dir], [
'secure_aggregate', 'weighted_aggregation', 'pin_memory', 'save_to_local_dir'], self.is_bool, '{} is not a bool')
self.check_trainer_param(
[self.tol], ['tol'], self.is_float, '{} is not a float')
def _init_aggregator(self, train_set):
# compute round to aggregate
cur_agg_round = 0
if self.aggregate_every_n_epoch is not None:
aggregate_round = self.epochs // self.aggregate_every_n_epoch
else:
aggregate_round = self.epochs
# initialize fed avg client
if self.fed_mode:
if self.weighted_aggregation:
sample_num = len(train_set)
else:
sample_num = 1.0
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
client_agg = SecureAggClient(
self.secure_aggregate, aggregate_weight=sample_num, communicate_match_suffix=self.comm_suffix)
else:
client_agg = None
else:
client_agg = None
return client_agg, aggregate_round
def set_model(self, model: t.nn.Module):
if not issubclass(type(model), t.nn.Module):
raise ValueError('model must be a subclass of pytorch nn.Module')
self.model = model
if self.cuda is not None:
self.model = self.model.cuda(self.cuda_main_device)
if self.data_parallel:
self.parallel_model = DataParallel(model, device_ids=self.cuda, output_device=self.cuda_main_device)
def _select_model(self):
if self.data_parallel:
return self.parallel_model
else:
return self.model
def train_an_epoch(self, epoch_idx, model, train_set, optimizer, loss):
epoch_loss = 0.0
batch_idx = 0
acc_num = 0
if isinstance(self.data_loader.sampler, DistributedSampler):
self.data_loader.sampler.set_epoch(epoch_idx)
dl = self.data_loader
if not self.fed_mode:
to_iterate = tqdm.tqdm(dl)
else:
to_iterate = dl
batch_label = None
for _batch_iter in to_iterate:
_batch_iter = self._decode(_batch_iter)
if isinstance(_batch_iter, list):
batch_data, batch_label = _batch_iter
else:
batch_data = _batch_iter
"""
if self.task_type in [consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]:
batch_data = _batch_iter
else:
batch_data, batch_label = _batch_iter
batch_data = self._decode(batch_data)
batch_label = self._decode(batch_label)
"""
if self.cuda is not None or self._enable_deepspeed:
device = self.cuda_main_device if self.cuda_main_device is not None else self.model.device
batch_data = self.to_cuda(batch_data, device)
if batch_label is not None:
batch_label = self.to_cuda(batch_label, device)
if not self._enable_deepspeed:
optimizer.zero_grad()
else:
model.zero_grad()
pred = model(batch_data)
if not loss and hasattr(pred, "loss"):
batch_loss = pred.loss
elif loss is not None:
if batch_label is None:
raise ValueError(
"When loss is set, please provide label to calculate loss"
)
if not isinstance(pred, torch.Tensor) and hasattr(pred, "logits"):
pred = pred.logits
batch_loss = loss(pred, batch_label)
else:
raise ValueError(
'FedAVGTrainer requires a loss function, but got None, please specify loss function in the'
' job configuration')
if not self._enable_deepspeed:
batch_loss.backward()
optimizer.step()
batch_loss_np = np.array(batch_loss.detach().tolist()) if self.cuda is None \
else np.array(batch_loss.cpu().detach().tolist())
if acc_num + self.batch_size > len(train_set):
batch_len = len(train_set) - acc_num
else:
batch_len = self.batch_size
epoch_loss += batch_loss_np * batch_len
else:
batch_loss = model.backward(batch_loss)
batch_loss_np = np.array(batch_loss.cpu().detach().tolist())
model.step()
batch_loss_np = self._sync_loss(batch_loss_np * self._get_batch_size(batch_data))
if distributed_util.is_rank_0():
epoch_loss += batch_loss_np
batch_idx += 1
# LOGGER.info(f"finish epoch={epoch_idx}, batch={batch_idx}")
if self.fed_mode:
LOGGER.debug(
'epoch {} batch {} finished'.format(epoch_idx, batch_idx))
epoch_loss = epoch_loss / len(train_set)
return epoch_loss
def train(
self,
train_set: Dataset,
validate_set: Dataset = None,
optimizer: t.optim.Optimizer = None,
loss=None,
extra_dict={}):
if optimizer is None:
raise ValueError(
'FedAVGTrainer requires an optimizer, but got None, please specify optimizer in the '
'job configuration')
if self.batch_size > len(train_set) or self.batch_size == -1:
self.batch_size = len(train_set)
# compute round to aggregate
cur_agg_round = 0
client_agg, aggregate_round = self._init_aggregator(train_set)
# running var
cur_epoch = 0
loss_history = []
need_stop = False
evaluation_summary = {}
self._get_train_data_loader(train_set)
# training process
for i in range(self.epochs):
cur_epoch = i
LOGGER.info('epoch is {}'.format(i))
model = self._select_model()
epoch_loss = self.train_an_epoch(i, model, train_set, optimizer, loss)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
self.callback_loss(epoch_loss, i)
loss_history.append(float(epoch_loss))
LOGGER.info('epoch loss is {}'.format(epoch_loss))
# federation process, if running local mode, cancel federation
if client_agg is not None or distributed_util.is_distributed():
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model averaging, only aggregate trainable param
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
self.model = client_agg.model_aggregation(self.model)
if distributed_util.is_distributed() and distributed_util.get_num_workers() > 1:
self._share_model()
else:
self._share_model()
# agg loss and get converge status
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
converge_status = client_agg.loss_aggregation(epoch_loss)
cur_agg_round += 1
if distributed_util.is_distributed() and distributed_util.get_num_workers() > 1:
self._sync_converge_status(converge_status)
else:
converge_status = self._sync_converge_status()
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
LOGGER.info(
'model averaging finished, aggregate round {}/{}'.format(
cur_agg_round, aggregate_round))
if converge_status:
LOGGER.info('early stop triggered, stop training')
need_stop = True
# validation process
if self.validation_freq and ((i + 1) % self.validation_freq == 0):
LOGGER.info('running validation')
ids_t, pred_t, label_t = self._predict(train_set)
evaluation_summary = self.evaluation(
ids_t,
pred_t,
label_t,
dataset_type='train',
epoch_idx=i,
task_type=self.task_type)
if validate_set is not None:
ids_v, pred_v, label_v = self._predict(validate_set)
evaluation_summary = self.evaluation(
ids_v,
pred_v,
label_v,
dataset_type='validate',
epoch_idx=i,
task_type=self.task_type)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
if self.save_to_local_dir:
self.local_checkpoint(
self.model, i, optimizer, converge_status=need_stop, loss_history=loss_history)
else:
self.checkpoint(
self.model, i, optimizer, converge_status=need_stop, loss_history=loss_history)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# if meet stop condition then stop
if need_stop:
break
# post-process
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
best_epoch = int(np.array(loss_history).argmin())
if self.save_to_local_dir:
self.local_save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
else:
self.save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
best_epoch = int(np.array(loss_history).argmin())
self.summary({
'best_epoch': best_epoch,
'loss_history': loss_history,
'need_stop': need_stop,
'metrics_summary': evaluation_summary
})
def _predict(self, dataset: Dataset):
pred_result = []
# switch eval mode
dataset.eval()
model = self._select_model()
model.eval()
if not dataset.has_sample_ids():
dataset.init_sid_and_getfunc(prefix=dataset.get_type())
labels = []
with torch.no_grad():
for _batch_iter in DataLoader(
dataset, self.batch_size
):
if isinstance(_batch_iter, list):
batch_data, batch_label = _batch_iter
else:
batch_label = _batch_iter.pop("labels")
batch_data = _batch_iter
if self.cuda is not None or self._enable_deepspeed:
device = self.cuda_main_device if self.cuda_main_device is not None else self.model.device
batch_data = self.to_cuda(batch_data, device)
pred = model(batch_data)
if not isinstance(pred, torch.Tensor) and hasattr(pred, "logits"):
pred = pred.logits
pred_result.append(pred)
labels.append(batch_label)
ret_rs = torch.concat(pred_result, axis=0)
ret_label = torch.concat(labels, axis=0)
# switch back to train mode
dataset.train()
model.train()
return dataset.get_sample_ids(), ret_rs, ret_label
def predict(self, dataset: Dataset):
if self.task_type in [consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]:
LOGGER.warning(f"Not support prediction of task_types={[consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]}")
return
if distributed_util.is_distributed() and not distributed_util.is_rank_0():
return
ids, ret_rs, ret_label = self._predict(dataset)
if self.fed_mode:
return self.format_predict_result(
ids, ret_rs, ret_label, task_type=self.task_type)
else:
return ret_rs, ret_label
def server_aggregate_procedure(self, extra_data={}):
# converge status
check_converge = False
converge_func = None
if self.early_stop:
check_converge = True
converge_func = converge_func_factory(
self.early_stop, self.tol).is_converge
LOGGER.info(
'check early stop, converge func is {}'.format(converge_func))
LOGGER.info('server running aggregate procedure')
server_agg = SecureAggServer(self.secure_aggregate, communicate_match_suffix=self.comm_suffix)
# aggregate and broadcast models
for i in range(self.epochs):
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model aggregate
server_agg.model_aggregation()
# loss aggregate
agg_loss, converge_status = server_agg.loss_aggregation(
check_converge=check_converge, converge_func=converge_func)
self.callback_loss(agg_loss, i)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
self.checkpoint(epoch_idx=i)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# check stop condition
if converge_status:
LOGGER.debug('stop triggered, stop aggregation')
break
LOGGER.info('server aggregation process done')
def _decode(self, data):
if isinstance(data, transformers.tokenization_utils_base.BatchEncoding):
return dict(data)
else:
return data
def _get_batch_size(self, data):
if isinstance(data, list):
return len(data)
elif isinstance(data, dict):
if "input_ids" in data:
return data["input_ids"].shape[0]
else:
for _, value in data.items():
if hasattr(value, "shape"):
return value.shape[0]
raise ValueError("cat not infer batch size from data")
def _get_collate_fn(self, dataset):
if not self.collate_fn and not hasattr(dataset, "collate_fn"):
return None
if self.collate_fn:
if not hasattr(dataset, "tokenizer"):
raise ValueError(f"Collate Fn Only Support in task types=[{consts.CAUSAL_LM}, {consts.SEQ_2_SEQ_LM}]")
collate_fn = getattr(transformers, self.collate_fn)(dataset.tokenizer, **self.collate_fn_params)
return collate_fn
else:
return dataset.collate_fn
def _get_train_data_loader(self, train_set):
collate_fn = self._get_collate_fn(train_set)
if not distributed_util.is_distributed() or distributed_util.get_num_workers() <= 1:
self.data_loader = DataLoader(
train_set,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
shuffle=self.shuffle,
num_workers=self.data_loader_worker,
collate_fn=collate_fn
)
else:
train_sampler = DistributedSampler(
train_set,
num_replicas=dist.get_world_size(),
rank=dist.get_rank()
)
self.data_loader = DataLoader(
train_set,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
num_workers=self.data_loader_worker,
collate_fn=collate_fn,
sampler=train_sampler
)
def _share_model(self):
if distributed_util.is_rank_0():
for p in self.model.parameters():
if p.requires_grad:
scatter_list = [p.data for _ in range(distributed_util.get_num_workers())]
dist.scatter(p.data, scatter_list, async_op=False)
else:
for p in self.model.parameters():
if p.requires_grad:
dist.scatter(p.data, src=0, async_op=False)
def _sync_converge_status(self, converge_status=None):
if distributed_util.is_rank_0():
t_status = self.to_cuda(torch.Tensor([converge_status]), self.model.device)
dist.scatter(t_status, [t_status for _ in range(distributed_util.get_num_workers())], async_op=False)
else:
t_status = self.to_cuda(torch.Tensor([False]), self.model.device)
dist.scatter(t_status, src=0, async_op=False)
return t_status[0].item()
def _sync_loss(self, loss):
if distributed_util.get_num_workers() == 1:
return loss
loss = self.to_cuda(torch.tensor(loss), self.model.device)
if distributed_util.is_rank_0():
loss_list = [torch.zeros_like(loss) for _ in range(distributed_util.get_num_workers())]
dist.gather(loss, gather_list=loss_list, async_op=False)
loss_sum = 0
for _l in loss_list:
loss_sum += _l.item()
return loss_sum
else:
dist.gather(loss, dst=0, async_op=False)
# LOGGER.info(f"Loss on rank{dist.get_rank()}={loss}")
| 25,839 | 41.291326 | 146 |
py
|
FATE
|
FATE-master/python/federatedml/nn/homo/trainer/trainer_base.py
|
import os
import abc
import importlib
import torch as t
import numpy as np
from torch.nn import Module
from typing import List
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.model_base import serialize_models
from federatedml.nn.backend.utils.common import ML_PATH
from federatedml.feature.instance import Instance
from federatedml.evaluation.evaluation import Evaluation
from federatedml.model_base import Metric, MetricMeta
from federatedml.param import EvaluateParam
class StdReturnFormat(object):
def __init__(self, id_table_list, pred_table, classes):
self.id = id_table_list
self.pred_table = pred_table
self.classes = classes
def __call__(self,):
return self.id, self.pred_table, self.classes
class ExporterBase(object):
def __init__(self, *args, **kwargs):
pass
def export_model_dict(self, model=None, optimizer=None, model_define=None, optimizer_define=None, loss_define=None,
epoch_idx=-1, converge_status=False, loss_history=None, best_epoch=-1, extra_data={}):
pass
class TrainerBase(object):
def __init__(self, **kwargs):
self._fed_mode = True
self.role = None
self.party_id = None
self.party_id_list = None
self._flowid = None
self._cache_model = None
self._model = None
self._tracker = None
self._model_checkpoint = None
self._exporter = None
self._evaluation_summary = {}
# running status
self._set_model_checkpoint_epoch = set()
# nn config
self.nn_define, self.opt_define, self.loss_define = {}, {}, {}
# ret summary
self._summary = {}
# deepspeed enabled
self._enable_deepspeed = False
self._deepspeed_zero_3 = False
@staticmethod
def is_pos_int(val):
return val > 0 and isinstance(val, int)
@staticmethod
def is_float(val):
return isinstance(val, float)
@staticmethod
def is_bool(val):
return isinstance(val, bool)
@staticmethod
def check_trainer_param(
var_list,
name_list,
judge_func,
warning_str,
allow_none=True):
for var, name in zip(var_list, name_list):
if allow_none and var is None:
continue
assert judge_func(var), warning_str.format(name)
@property
def model(self):
if not hasattr(self, '_model'):
raise AttributeError(
'model variable is not initialized, remember to call'
' super(your_class, self).__init__()')
if self._model is None:
raise AttributeError(
'model is not set, use set_model() function to set training model')
return self._model
@model.setter
def model(self, val):
self._model = val
@property
def fed_mode(self):
if not hasattr(self, '_fed_mode'):
raise AttributeError(
'run_local_mode variable is not initialized, remember to call'
' super(your_class, self).__init__()')
return self._fed_mode
@fed_mode.setter
def fed_mode(self, val):
assert isinstance(val, bool), 'fed mode must be a bool'
self._fed_mode = val
def enable_deepspeed(self, is_zero_3=False):
self._enable_deepspeed = True
self._deepspeed_zero_3 = is_zero_3
def local_mode(self):
self.fed_mode = False
def set_nn_config(self, nn_define, optimizer_define, loss_define):
self.nn_define = nn_define
self.opt_define = optimizer_define
self.loss_define = loss_define
def set_tracker(self, tracker):
self._tracker = tracker
def set_checkpoint(self, chkp):
self._model_checkpoint = chkp
def set_party_id_list(self, party_id_list):
self.party_id_list = party_id_list
def set_model_exporter(self, exporter):
assert isinstance(
exporter, ExporterBase), 'exporter is not an instance of ExporterBase'
self._exporter = exporter
def get_cached_model(self):
return self._cache_model
@staticmethod
def task_type_infer(predict_result: t.Tensor, true_label):
# infer task type and classes(of classification task)
predict_result = predict_result.cpu()
true_label = true_label.cpu()
pred_shape = predict_result.shape
with t.no_grad():
if true_label.max() == 1.0 and true_label.min() == 0.0:
return consts.BINARY
if (len(pred_shape) > 1) and (pred_shape[1] > 1):
if t.isclose(
predict_result.sum(
axis=1).cpu(), t.Tensor(
[1.0])).all():
return consts.MULTY
else:
return None
elif (len(pred_shape) == 1) or (pred_shape[1] == 1):
return consts.REGRESSION
return None
def _update_metric_summary(self, metric_dict):
if len(metric_dict) == 0:
return
iter_name = list(metric_dict.keys())[0]
metric_dict = metric_dict[iter_name]
if len(self._evaluation_summary) == 0:
self._evaluation_summary = {namespace: {}
for namespace in metric_dict}
for namespace in metric_dict:
for metric_name in metric_dict[namespace]:
epoch_metric = metric_dict[namespace][metric_name]
if namespace not in self._evaluation_summary:
self._evaluation_summary[namespace] = {}
if metric_name not in self._evaluation_summary[namespace]:
self._evaluation_summary[namespace][metric_name] = []
self._evaluation_summary[namespace][metric_name].append(
epoch_metric)
def get_evaluation_summary(self):
return self._evaluation_summary
def get_summary(self):
return self._summary
"""
User Interfaces
"""
def _local_save(
self,
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path):
LOGGER.debug('save model to local dir')
if hasattr(model, "enable_save_pretrained") and model.enable_save_pretrained:
model.save_pretrained(save_path)
else:
unwrap_model = TrainerBase.unwrap_model(model)
if hasattr(model, "enable_save_pretrained") and model.enable_save_pretrained:
unwrap_model.save_pretrained(save_path)
else:
model_state_dict = model.state_dict()
model_dict = {
'model': model_state_dict,
'optimizer': optimizer.state_dict(),
'model_define': self.nn_define,
'optimizer_define': self.opt_define,
'loss_define': self.loss_define,
'epoch_idx': epoch_idx,
'converge_status': converge_status,
'loss_history': loss_history,
'best_epoch': best_epoch,
'extra_data': extra_data
}
t.save(model_dict, save_path)
local_save_path = save_path if not self._enable_deepspeed else os.environ[consts.FLOW_MODEL_SYNC_PATH]
model_dict = self._exporter.export_model_dict(model_define=self.nn_define,
optimizer_define=self.opt_define,
loss_define=self.loss_define,
epoch_idx=epoch_idx,
converge_status=converge_status,
loss_history=loss_history,
best_epoch=best_epoch,
extra_data=extra_data,
local_save_path=local_save_path
)
self._cache_model = model_dict
def set_model(self, model: Module):
if not issubclass(type(model), Module):
raise ValueError('model must be a subclass of pytorch nn.Module')
self.model = model
def save(
self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
if self._exporter:
LOGGER.debug('save model to fate')
model_dict = self._exporter.export_model_dict(model=model,
optimizer=optimizer,
model_define=self.nn_define,
optimizer_define=self.opt_define,
loss_define=self.loss_define,
epoch_idx=epoch_idx,
converge_status=converge_status,
loss_history=loss_history,
best_epoch=best_epoch,
extra_data=extra_data
)
self._cache_model = model_dict
def checkpoint(
self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
"""
if isinstance(TrainerBase.unwrap_model(model), PELLM):
raise ValueError("save checkpoint of Pretrained model should provide local dir")
"""
if self._model_checkpoint:
if self._exporter is None:
raise RuntimeError('exporter is None, cannot save checkpoint')
if epoch_idx in self._set_model_checkpoint_epoch:
LOGGER.info(
'checkpoint at epoch {} set, skip setting checkpoint'.format(epoch_idx))
return
self.save(model=model, epoch_idx=epoch_idx, optimizer=optimizer, converge_status=converge_status,
loss_history=loss_history, best_epoch=best_epoch, extra_data=extra_data)
self._model_checkpoint.add_checkpoint(len(self._set_model_checkpoint_epoch),
to_save_model=serialize_models(self._cache_model)) # step_index, to_save_model
self._set_model_checkpoint_epoch.add(epoch_idx)
LOGGER.info('checkpoint at epoch {} saved'.format(epoch_idx))
def local_save(self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
if self._exporter:
# default saving folder is under the job folder
model_name = "model.pkl"
if self._enable_deepspeed:
save_path = os.path.join(os.environ[consts.DEEPSPEED_MODEL_DIR], model_name)
else:
save_path = os.path.abspath(os.path.join('../../../../', model_name))
self._local_save(
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path)
def local_checkpoint(self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
if self._exporter:
# default saving folder is under the job folder
model_name = 'checkpoint_{}.pkl'.format(epoch_idx)
if self._enable_deepspeed:
save_path = os.path.join(os.environ[consts.DEEPSPEED_MODEL_DIR], model_name)
else:
save_path = os.path.abspath(os.path.join('../../../../', model_name))
self._local_save(
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path)
self._model_checkpoint.add_checkpoint(len(self._set_model_checkpoint_epoch),
to_save_model=serialize_models(self._cache_model)) # step_index, to_save_model
self._set_model_checkpoint_epoch.add(epoch_idx)
LOGGER.info('checkpoint at epoch {} saved'.format(epoch_idx))
def format_predict_result(self, sample_ids: List, predict_result: t.Tensor,
true_label: t.Tensor, task_type: str = None):
predict_result = predict_result.cpu().detach()
if task_type == 'auto':
task_type = self.task_type_infer(predict_result, true_label)
if task_type is None:
LOGGER.warning(
'unable to infer predict result type, predict process will be skipped')
return None
classes = None
if task_type == consts.BINARY:
classes = [0, 1]
elif task_type == consts.MULTY:
classes = [i for i in range(predict_result.shape[1])]
true_label = true_label.cpu().detach().flatten().tolist()
if task_type == consts.MULTY:
predict_result = predict_result.tolist()
else:
predict_result = predict_result.flatten().tolist()
id_table = [(id_, Instance(label=l))
for id_, l in zip(sample_ids, true_label)]
score_table = [(id_, pred)
for id_, pred in zip(sample_ids, predict_result)]
return StdReturnFormat(id_table, score_table, classes)
def callback_metric(self, metric_name: str, value: float, metric_type='train', epoch_idx=0):
assert metric_type in [
'train', 'validate'], 'metric_type should be train or validate'
iter_name = 'iteration_{}'.format(epoch_idx)
if self._tracker is not None:
self._tracker.log_metric_data(
metric_type, iter_name, [
Metric(
metric_name, np.round(
value, 6))])
self._tracker.set_metric_meta(
metric_type, iter_name, MetricMeta(
name=metric_name, metric_type='EVALUATION_SUMMARY'))
def callback_loss(self, loss: float, epoch_idx: int):
if self._tracker is not None:
self._tracker.log_metric_data(
metric_name="loss",
metric_namespace="train",
metrics=[Metric(epoch_idx, loss)],
)
def summary(self, summary_dict: dict):
assert isinstance(summary_dict, dict), 'summary must be a dict'
self._summary = summary_dict
def evaluation(self, sample_ids: list, pred_scores: t.Tensor, label: t.Tensor, dataset_type='train',
metric_list=None, epoch_idx=0, task_type=None):
eval_obj = Evaluation()
if task_type == 'auto':
task_type = self.task_type_infer(pred_scores, label)
if task_type is None:
LOGGER.debug('cannot infer task type, return')
return
assert dataset_type in [
'train', 'validate'], 'dataset_type must in ["train", "validate"]'
eval_param = EvaluateParam(eval_type=task_type)
if task_type == consts.BINARY:
eval_param.metrics = ['auc', 'ks']
elif task_type == consts.MULTY:
eval_param.metrics = ['accuracy', 'precision', 'recall']
eval_param.check_single_value_default_metric()
eval_obj._init_model(eval_param)
pred_scores = pred_scores.cpu().detach().numpy()
label = label.cpu().detach().numpy().flatten()
if task_type == consts.REGRESSION or task_type == consts.BINARY:
pred_scores = pred_scores.flatten()
label = label.flatten()
pred_scores = pred_scores.tolist()
label = label.tolist()
assert len(pred_scores) == len(
label), 'the length of predict score != the length of label, pred {} and label {}'.format(len(pred_scores), len(label))
eval_data = []
for id_, s, l in zip(sample_ids, pred_scores, label):
if task_type == consts.REGRESSION:
eval_data.append([id_, (l, s, s)])
if task_type == consts.MULTY:
pred_label = np.argmax(s)
eval_data.append([id_, (l, pred_label, s)])
elif task_type == consts.BINARY:
pred_label = (s > 0.5) + 1
eval_data.append([id_, (l, pred_label, s)])
eval_result = eval_obj.evaluate_metrics(dataset_type, eval_data)
if self._tracker is not None:
eval_obj.set_tracker(self._tracker)
# send result to fate-board
eval_obj.callback_metric_data(
{'iteration_{}'.format(epoch_idx): [eval_result]})
self._update_metric_summary(eval_obj.metric_summaries)
return self._evaluation_summary
def to_cuda(self, var, device=0):
if hasattr(var, 'cuda'):
return var.cuda(device)
elif isinstance(var, tuple) or isinstance(var, list):
ret = tuple(self.to_cuda(i) for i in var)
return ret
elif isinstance(var, dict):
for k in var:
if hasattr(var[k], 'cuda'):
var[k] = var[k].cuda(device)
return var
else:
return var
@abc.abstractmethod
def train(self, train_set, validate_set=None, optimizer=None, loss=None, extra_data={}):
"""
train_set : A Dataset Instance, must be a instance of subclass of Dataset (federatedml.nn.dataset.base),
for example, TableDataset() (from federatedml.nn.dataset.table)
validate_set : A Dataset Instance, but optional must be a instance of subclass of Dataset
(federatedml.nn.dataset.base), for example, TableDataset() (from federatedml.nn.dataset.table)
optimizer : A pytorch optimizer class instance, for example, t.optim.Adam(), t.optim.SGD()
loss : A pytorch Loss class, for example, nn.BECLoss(), nn.CrossEntropyLoss()
"""
pass
@abc.abstractmethod
def predict(self, dataset):
pass
@abc.abstractmethod
def server_aggregate_procedure(self, extra_data={}):
pass
@staticmethod
def unwrap_model(model):
if hasattr(model, "module"):
return TrainerBase.unwrap_model(model.module)
else:
return model
"""
Load Trainer
"""
def get_trainer_class(trainer_module_name: str):
if trainer_module_name.endswith('.py'):
trainer_module_name = trainer_module_name.replace('.py', '')
ds_modules = importlib.import_module(
'{}.homo.trainer.{}'.format(
ML_PATH, trainer_module_name))
try:
trainers = []
for k, v in ds_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, TrainerBase) and v is not TrainerBase:
trainers.append(v)
if len(trainers) == 0:
raise ValueError('Did not find any class in {}.py that is the subclass of Trainer class'.
format(trainer_module_name))
else:
return trainers[-1] # return the last defined trainer
except ValueError as e:
raise e
| 20,753 | 35.410526 | 131 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.