python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import tenseal as ts
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.he import decomposers
from nvflare.app_opt.he.constant import HE_ALGORITHM_CKKS
from nvflare.app_opt.he.homomorphic_encrypt import (
deserialize_nested_dict,
load_tenseal_context_from_workspace,
serialize_nested_dict,
)
from nvflare.security.logging import secure_format_exception
def add_to_global_weights(new_val, base_weights, v_name):
try:
global_var = base_weights[v_name]
if isinstance(new_val, np.ndarray):
new_val = new_val.ravel()
if isinstance(global_var, np.ndarray):
global_var = global_var.ravel()
n_vars_total = np.size(global_var)
elif isinstance(global_var, ts.CKKSVector):
n_vars_total = global_var.size()
else:
raise ValueError(f"global_var has type {type(global_var)} which is not supported.")
# update the global model
updated_vars = new_val + global_var
except Exception as e:
raise ValueError(f"add_to_global_weights Exception: {secure_format_exception(e)}") from e
return updated_vars, n_vars_total
class HEModelShareableGenerator(ShareableGenerator):
def __init__(self, tenseal_context_file="server_context.tenseal"):
"""This ShareableGenerator converts between Shareable and Learnable objects.
This conversion is done with homomorphic encryption (HE) support using
TenSEAL https://github.com/OpenMined/TenSEAL.
Args:
tenseal_context_file: tenseal context files containing TenSEAL context
"""
super().__init__()
self.tenseal_context = None
self.tenseal_context_file = tenseal_context_file
decomposers.register()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
elif event_type == EventType.END_RUN:
self.tenseal_context = None
def _shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
dxo = from_shareable(shareable)
enc_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)
if enc_algorithm != HE_ALGORITHM_CKKS:
raise ValueError("expected encryption algorithm {} but got {}".format(HE_ALGORITHM_CKKS, enc_algorithm))
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
if not base_model:
self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
return base_model
deserialize_nested_dict(base_model, context=self.tenseal_context)
base_weights = base_model[ModelLearnableKey.WEIGHTS]
if dxo.data_kind == DataKind.WEIGHT_DIFF:
start_time = time.time()
model_diff = dxo.data
if not model_diff:
raise ValueError(f"{self._name} DXO data is empty!")
deserialize_nested_dict(model_diff, context=self.tenseal_context)
n_vars = len(model_diff.items())
n_params = 0
for v_name, v_value in model_diff.items():
self.log_debug(fl_ctx, f"adding {v_name} to global model...")
updated_vars, n_vars_total = add_to_global_weights(v_value, base_weights, v_name)
n_params += n_vars_total
base_weights[v_name] = updated_vars
self.log_debug(fl_ctx, f"assigned new {v_name}")
end_time = time.time()
self.log_info(
fl_ctx,
f"Updated global model {n_vars} vars with {n_params} params in {end_time - start_time} seconds",
)
elif dxo.data_kind == DataKind.WEIGHTS:
base_model[ModelLearnableKey.WEIGHTS] = dxo.data
else:
raise NotImplementedError(f"data type {dxo.data_kind} not supported!")
self.log_debug(fl_ctx, "returning model")
base_model[ModelLearnableKey.META] = dxo.get_meta_props()
return base_model
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
"""Updates the global model in `Learnable` in encrypted space.
Args:
shareable: shareable
fl_ctx: FLContext
Returns:
Learnable object
"""
self.log_info(fl_ctx, "shareable_to_learnable...")
try:
return self._shareable_to_learnable(shareable, fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, "error converting shareable to model")
raise ValueError(f"{self._name} Exception {secure_format_exception(e)}") from e
def learnable_to_shareable(self, model_learnable: ModelLearnable, fl_ctx: FLContext) -> Shareable:
"""Convert ModelLearnable to Shareable.
Args:
model_learnable (ModelLearnable): model to be converted
fl_ctx (FLContext): FL context
Returns:
Shareable: a shareable containing a DXO object.
"""
# serialize model_learnable
serialize_nested_dict(model_learnable)
dxo = model_learnable_to_dxo(model_learnable)
return dxo.to_shareable()
| NVFlare-main | nvflare/app_opt/he/model_shareable_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/he/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
from typing import Union
import numpy as np
import tenseal as ts
from tenseal.tensors.ckksvector import CKKSVector
from nvflare.apis.dxo import DXO, DataKind, MetaKey
from nvflare.apis.dxo_filter import DXOFilter
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_opt.he import decomposers
from nvflare.app_opt.he.constant import HE_ALGORITHM_CKKS
from nvflare.app_opt.he.homomorphic_encrypt import count_encrypted_layers, load_tenseal_context_from_workspace
class HEModelEncryptor(DXOFilter):
def __init__(
self,
tenseal_context_file="client_context.tenseal",
encrypt_layers=None,
aggregation_weights=None,
weigh_by_local_iter=True,
data_kinds=None,
):
"""Filter to encrypt Shareable object using homomorphic encryption (HE) with TenSEAL
https://github.com/OpenMined/TenSEAL.
Args:
tenseal_context_file: tenseal context files containing encryption keys and parameters
encrypt_layers: if not specified (None), all layers are being encrypted;
if list of variable/layer names, only specified variables are encrypted;
if string containing regular expression (e.g. "conv"), only matched variables are
being encrypted.
aggregation_weights: dictionary of client aggregation `{"client1": 1.0, "client2": 2.0, "client3": 3.0}`;
defaults to a weight of 1.0 if not specified.
Note, if specified, the same `aggregation_weights` should also be used on the server
aggregator for the resulting weighted sum to be valid,
i.e. in `HEInTimeAccumulateWeightedAggregator`.
weigh_by_local_iter: If true, multiply client weights on first before encryption (default: `True`
which is recommended for HE)
data_kinds: data kinds to apply this filter
"""
if not data_kinds:
data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
super().__init__(supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF], data_kinds_to_filter=data_kinds)
self.logger.info("Using HE model encryptor.")
self.tenseal_context = None
self.tenseal_context_file = tenseal_context_file
self.aggregation_weights = aggregation_weights or {}
self.logger.info(f"client weights control: {self.aggregation_weights}")
self.weigh_by_local_iter = weigh_by_local_iter
self.n_iter = None
self.client_name = None
self.aggregation_weight = None
# choose which layers to encrypt
if encrypt_layers is not None:
if not (isinstance(encrypt_layers, list) or isinstance(encrypt_layers, str)):
raise ValueError(
"Must provide a list of layer names or a string for regex matching, but got {}".format(
type(encrypt_layers)
)
)
if isinstance(encrypt_layers, list):
for encrypt_layer in encrypt_layers:
if not isinstance(encrypt_layer, str):
raise ValueError(
"encrypt_layers needs to be a list of layer names to encrypt, but found element of type {}".format(
type(encrypt_layer)
)
)
self.encrypt_layers = encrypt_layers
self.logger.info(f"Encrypting {len(encrypt_layers)} layers")
elif isinstance(encrypt_layers, str):
self.encrypt_layers = re.compile(encrypt_layers) if encrypt_layers else None
self.logger.info(f'Encrypting all layers based on regex matches with "{encrypt_layers}"')
else:
self.encrypt_layers = [True] # needs to be list for logic in encryption()
self.logger.info("Encrypting all layers")
decomposers.register()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
elif event_type == EventType.END_RUN:
self.tenseal_context = None
def encryption(self, params, fl_ctx: FLContext):
n_params = len(params.keys())
self.log_info(fl_ctx, f"Running HE Encryption algorithm on {n_params} variables")
# parse regex encrypt layers
if isinstance(self.encrypt_layers, re.Pattern):
re_pattern = self.encrypt_layers
self.encrypt_layers = []
for var_name in params:
if re_pattern.search(var_name):
self.encrypt_layers.append(var_name)
self.log_info(fl_ctx, f"Regex found {self.encrypt_layers} matching layers.")
if len(self.encrypt_layers) == 0:
raise ValueError(f"No matching layers found with regex {re_pattern}")
start_time = time.time()
n_encrypted, n_total = 0, 0
encryption_dict = {}
vmins, vmaxs = [], []
for i, param_name in enumerate(params.keys()):
values = params[param_name].ravel()
_n = np.size(values)
n_total += _n
# weigh before encryption
if self.aggregation_weight:
values = values * np.float64(self.aggregation_weight)
if self.weigh_by_local_iter:
values = values * np.float64(self.n_iter)
if param_name in self.encrypt_layers or self.encrypt_layers[0] is True:
self.log_info(fl_ctx, f"Encrypting vars {i+1} of {n_params}: {param_name} with {_n} values")
vmin = np.min(params[param_name])
vmax = np.max(params[param_name])
vmins.append(vmin)
vmaxs.append(vmax)
params[param_name] = ts.ckks_vector(self.tenseal_context, values)
encryption_dict[param_name] = True
n_encrypted += _n
elif isinstance(values, CKKSVector):
self.log_error(
fl_ctx, f"{i} of {n_params}: {param_name} = {np.shape(params[param_name])} already encrypted!"
)
raise ValueError("This should not happen!")
else:
params[param_name] = values
encryption_dict[param_name] = False
end_time = time.time()
if n_encrypted == 0:
raise ValueError("Nothing has been encrypted! Check provided encrypt_layers list of layer names or regex.")
self.log_info(
fl_ctx,
f"Encryption time for {n_encrypted} of {n_total} params"
f" (encrypted value range [{np.min(vmins)}, {np.max(vmaxs)}])"
f" {end_time - start_time} seconds.",
)
# params is a dictionary. keys are layer names. values are either weights or ckks_vector of weights.
# encryption_dict: keys are layer names. values are True for ckks_vectors, False elsewhere.
return params, encryption_dict
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Filter process apply to the Shareable object.
Args:
dxo: data to be processed
shareable: that the dxo belongs to
fl_ctx: FLContext
Returns: DXO object with encrypted weights
"""
# TODO: could be removed later
if self.tenseal_context is None:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
self.client_name = peer_ctx.get_identity_name(default="?")
if self.aggregation_weights:
self.aggregation_weight = self.aggregation_weights.get(self.client_name, 1.0)
self.log_info(fl_ctx, f"weighting {self.client_name} by aggregation weight {self.aggregation_weight}")
if self.weigh_by_local_iter:
self.n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, None)
if self.n_iter is None:
raise ValueError("DXO data does not have local iterations for weighting!")
self.log_info(fl_ctx, f"weighting by local iter before encryption with {self.n_iter}")
return self._process(dxo, fl_ctx)
def _process(self, dxo: DXO, fl_ctx: FLContext) -> DXO:
self.log_info(fl_ctx, "Running HE encryption...")
encrypted_params, encryption_dict = self.encryption(params=dxo.data, fl_ctx=fl_ctx)
new_dxo = DXO(data_kind=dxo.data_kind, data=encrypted_params, meta=dxo.meta)
new_dxo.set_meta_prop(key=MetaKey.PROCESSED_KEYS, value=encryption_dict)
new_dxo.set_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, value=HE_ALGORITHM_CKKS)
n_encrypted, n_total = count_encrypted_layers(encryption_dict)
self.log_info(fl_ctx, f"{n_encrypted} of {n_total} layers encrypted")
return new_dxo
| NVFlare-main | nvflare/app_opt/he/model_encryptor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import Learnable
from nvflare.app_common.abstract.persistor_filter import PersistorFilter
from nvflare.app_opt.he.homomorphic_encrypt import (
deserialize_nested_dict,
load_tenseal_context_from_workspace,
serialize_nested_dict,
)
class HEModelSerializeFilter(PersistorFilter):
def __init__(self, tenseal_context_file="server_context.tenseal"):
"""Used to serialize TenSEAL encrypted server models for use with
homomorphic encryption (HE) support using TenSEAL https://github.com/OpenMined/TenSEAL.
Args:
tenseal_context_file: tenseal context files containing TenSEAL context
"""
super().__init__()
self.tenseal_context = None
self.tenseal_context_file = tenseal_context_file
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
elif event_type == EventType.END_RUN:
self.tenseal_context = None
def process_post_load(self, learnable: Learnable, fl_ctx: FLContext) -> Learnable:
"""Filter process applied to the Learnable object after it was loaded.
Args:
learnable: Learnable
fl_ctx: FLContext
Returns:
a Learnable object
"""
return deserialize_nested_dict(learnable, self.tenseal_context)
def process_pre_save(self, learnable: Learnable, fl_ctx: FLContext) -> Learnable:
"""Filter process applied to the Learnable object to support persisting when containing encrypted objects.
Args:
learnable: Learnable
fl_ctx: FLContext
Returns:
a Learnable object
"""
return serialize_nested_dict(learnable)
def process_post_save(self, learnable: Learnable, fl_ctx: FLContext) -> Learnable:
"""Filter process applied to the Learnable object to support persisting when containing encrypted objects.
Args:
learnable: Learnable
fl_ctx: FLContext
Returns:
a Learnable object
"""
return deserialize_nested_dict(learnable, self.tenseal_context)
def process_post_get(self, learnable: Learnable, fl_ctx: FLContext) -> Learnable:
"""Filter process applied to the Learnable object after it was returned.
Args:
learnable: Learnable
fl_ctx: FLContext
Returns:
a Learnable object
"""
return deserialize_nested_dict(learnable, self.tenseal_context)
| NVFlare-main | nvflare/app_opt/he/model_serialize_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Union
import tenseal as ts
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.aggregators.intime_accumulate_model_aggregator import InTimeAccumulateWeightedAggregator
from nvflare.app_opt.he import decomposers
class HEInTimeAccumulateWeightedAggregator(InTimeAccumulateWeightedAggregator):
def __init__(
self,
exclude_vars: Union[str, Dict[str, str], None] = None,
aggregation_weights: Union[Dict[str, Any], Dict[str, Dict[str, Any]], None] = None,
expected_data_kind: Union[DataKind, Dict[str, DataKind]] = DataKind.WEIGHT_DIFF,
weigh_by_local_iter=False,
):
"""In time aggregator for `Shareables` encrypted using homomorphic encryption (HE) with TenSEAL https://github.com/OpenMined/TenSEAL.
Needed to register FOBS decomposer for HE (e.g. for CKKSVector).
Args:
exclude_vars ([list], optional): variable names that should be excluded from aggregation (use regular expression). Defaults to None.
aggregation_weights ([dict], optional): dictionary of client aggregation. Defaults to None.
weigh_by_local_iter (bool, optional): If true, multiply client weights on first in encryption space
(default: `False` which is recommended for HE, first multiply happens in `HEModelEncryptor`)].
expected_data_kind (str, optional): the data_kind this aggregator can process. Defaults to "WEIGHT_DIFF".
"""
super().__init__(
exclude_vars=exclude_vars,
aggregation_weights=aggregation_weights,
expected_data_kind=expected_data_kind,
weigh_by_local_iter=weigh_by_local_iter,
)
decomposers.register()
def aggregate(self, fl_ctx: FLContext) -> Shareable:
shareable = super().aggregate(fl_ctx=fl_ctx)
# get processed keys and add to dxo
dxo = from_shareable(shareable)
weights = dxo.data
if not isinstance(weights, dict):
raise ValueError(f"Expected weights to be of type dict but got type {type(weights)}")
encrypted_layers = dict()
for k, v in weights.items():
if isinstance(v, ts.CKKSVector):
encrypted_layers[k] = True
else:
encrypted_layers[k] = False
dxo.set_meta_prop(MetaKey.PROCESSED_KEYS, encrypted_layers)
return dxo.to_shareable()
| NVFlare-main | nvflare/app_opt/he/intime_accumulate_model_aggregator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tenseal as ts
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.fuel.sec.security_content_service import LoadResult, SecurityContentService
def load_tenseal_context_from_workspace(ctx_file_name: str, fl_ctx: FLContext):
"""Loads homomorphic encryption (HE) context from TenSEAL (https://github.com/OpenMined/TenSEAL) containing encryption keys and parameters.
Args:
ctx_file_name: filepath of TensSEAL context file
fl_ctx: FL context
Returns:
TenSEAL context
"""
is_secure_mode = fl_ctx.get_prop(FLContextKey.SECURE_MODE, True)
data, rc = SecurityContentService.load_content(ctx_file_name)
bad_rcs = [LoadResult.INVALID_CONTENT, LoadResult.NO_SUCH_CONTENT]
if is_secure_mode:
bad_rcs.extend([LoadResult.INVALID_SIGNATURE, LoadResult.NOT_SIGNED])
if rc in bad_rcs:
raise ValueError("Cannot load tenseal_context {}: {}".format(ctx_file_name, rc))
context = ts.context_from(data)
return context
def count_encrypted_layers(encrypted_layers: dict):
"""Count number of encrypted layers homomorphic encryption (HE) layers/variables."""
n_total = len(encrypted_layers)
n_encrypted = 0
for e in encrypted_layers.keys():
if encrypted_layers[e]:
n_encrypted += 1
return n_encrypted, n_total
def serialize_nested_dict(d):
for k, v in d.items():
if isinstance(v, dict):
serialize_nested_dict(v)
else:
if isinstance(v, ts.CKKSVector):
d[k] = v.serialize()
return d
def deserialize_nested_dict(d, context):
for k, v in d.items():
if isinstance(v, dict):
deserialize_nested_dict(v, context)
else:
if isinstance(v, bytes):
d[k] = ts.ckks_vector_from(context, v)
return d
| NVFlare-main | nvflare/app_opt/he/homomorphic_encrypt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HE_ALGORITHM_CKKS = "CKKS"
| NVFlare-main | nvflare/app_opt/he/constant.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
from nvflare.apis.dxo import DXO, from_bytes
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.workflows.cross_site_model_eval import CrossSiteModelEval
from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace, serialize_nested_dict
from nvflare.security.logging import secure_format_exception
# TODO: Might be able to use CrossSiteModelEval directly
class HECrossSiteModelEval(CrossSiteModelEval):
def __init__(
self,
tenseal_context_file="server_context.tenseal",
task_check_period=0.5,
cross_val_dir=AppConstants.CROSS_VAL_DIR,
submit_model_timeout=600,
validation_timeout: int = 6000,
model_locator_id="",
formatter_id="",
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
validation_task_name=AppConstants.TASK_VALIDATION,
cleanup_models=False,
participating_clients=None,
wait_for_clients_timeout=300,
):
"""Cross Site Model Validation workflow for HE.
Args:
task_check_period (float, optional): How often to check for new tasks or tasks being finished.
Defaults to 0.5.
cross_val_dir (str, optional): Path to cross site validation directory relative to run directory.
Defaults to `AppConstants.CROSS_VAL_DIR`.
submit_model_timeout (int, optional): Timeout of submit_model_task. Defaults to 600 secs.
validation_timeout (int, optional): Timeout for validate_model task. Defaults to 6000 secs.
model_locator_id (str, optional): ID for model_locator component. Defaults to "".
formatter_id (str, optional): ID for formatter component. Defaults to "".
submit_model_task_name (str, optional): Name of submit_model task. Defaults to `AppConstants.TASK_SUBMIT_MODEL`.
validation_task_name (str, optional): Name of validate_model task. Defaults to `AppConstants.TASK_VALIDATION`.
cleanup_models (bool, optional): Whether or not models should be deleted after run. Defaults to False.
participating_clients (list, optional): List of participating client names. If not provided, defaults
to all clients connected at start of controller.
wait_for_clients_timeout (int, optional): Timeout for clients to appear. Defaults to 300 secs
"""
super().__init__(
task_check_period=task_check_period,
cross_val_dir=cross_val_dir,
validation_timeout=validation_timeout,
model_locator_id=model_locator_id,
formatter_id=formatter_id,
validation_task_name=validation_task_name,
submit_model_task_name=submit_model_task_name,
submit_model_timeout=submit_model_timeout,
cleanup_models=cleanup_models,
participating_clients=participating_clients,
wait_for_clients_timeout=wait_for_clients_timeout,
)
self.tenseal_context = None
self.tenseal_context_file = tenseal_context_file
def start_controller(self, fl_ctx: FLContext):
super().start_controller(fl_ctx)
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
def _save_validation_content(self, name: str, save_dir: str, dxo: DXO, fl_ctx: FLContext) -> str:
"""Saves shareable to given directory within the app_dir.
Args:
name (str): Name of shareable
save_dir (str): Relative path to directory in which to save
dxo (DXO): DXO object
fl_ctx (FLContext): FLContext object
Returns:
str: Path to the file saved.
"""
# Save the model with name as the filename to shareable directory
data_filename = os.path.join(save_dir, name)
try:
serialize_nested_dict(dxo.data)
bytes_to_save = dxo.to_bytes()
except Exception as e:
raise ValueError(f"Unable to extract shareable contents. Exception: {(secure_format_exception(e))}")
# Save contents to path
try:
with open(data_filename, "wb") as f:
f.write(bytes_to_save)
except Exception as e:
raise ValueError(f"Unable to save shareable contents: {secure_format_exception(e)}")
self.log_debug(fl_ctx, f"Saved cross validation model with name: {name}.")
return data_filename
def _load_validation_content(self, name: str, load_dir: str, fl_ctx: FLContext) -> Union[DXO, None]:
# Load shareable from disk
shareable_filename = os.path.join(load_dir, name)
# load shareable
try:
with open(shareable_filename, "rb") as f:
data = f.read()
dxo: DXO = from_bytes(data)
self.log_debug(fl_ctx, f"Loading cross validation shareable content with name: {name}.")
except Exception as e:
raise ValueError(f"Exception in loading shareable content for {name}: {secure_format_exception(e)}")
return dxo
| NVFlare-main | nvflare/app_opt/he/cross_site_model_eval.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Union
import numpy as np
from tenseal.tensors.ckksvector import CKKSVector
from nvflare.apis.dxo import DXO, DataKind, MetaKey
from nvflare.apis.dxo_filter import DXOFilter
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_opt.he import decomposers
from nvflare.app_opt.he.constant import HE_ALGORITHM_CKKS
from nvflare.app_opt.he.homomorphic_encrypt import (
count_encrypted_layers,
deserialize_nested_dict,
load_tenseal_context_from_workspace,
)
class HEModelDecryptor(DXOFilter):
def __init__(self, tenseal_context_file="client_context.tenseal", data_kinds: [str] = None):
"""Filter to decrypt Shareable object using homomorphic encryption (HE) with TenSEAL
https://github.com/OpenMined/TenSEAL.
Args:
tenseal_context_file: tenseal context files containing decryption keys and parameters
data_kinds: kinds of DXOs to filter
"""
if not data_kinds:
data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
super().__init__(supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF], data_kinds_to_filter=data_kinds)
self.logger.info("Using HE model decryptor.")
self.tenseal_context = None
self.tenseal_context_file = tenseal_context_file
decomposers.register()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
elif event_type == EventType.END_RUN:
self.tenseal_context = None
def decryption(self, params: dict, encrypted_layers: dict, fl_ctx: FLContext):
n_params = len(params.keys())
self.log_info(fl_ctx, f"Running HE Decryption algorithm {n_params} variables")
if encrypted_layers is None:
raise ValueError("encrypted_layers is None!")
deserialize_nested_dict(params, context=self.tenseal_context)
start_time = time.time()
n_decrypted, n_total = 0, 0
for i, param_name in enumerate(params.keys()):
values = params[param_name]
if encrypted_layers[param_name]:
_n = values.size()
n_total += _n
if isinstance(values, CKKSVector):
self.log_info(fl_ctx, f"Decrypting vars {i+1} of {n_params}: {param_name} with {_n} values")
params[param_name] = values.decrypt(secret_key=self.tenseal_context.secret_key())
n_decrypted += _n
else:
self.log_info(
fl_ctx,
f"{i} of {n_params}: {param_name} = {np.shape(params[param_name])} already decrypted (RAW)!",
)
raise ValueError("Should be encrypted at this point!")
else:
params[param_name] = values
end_time = time.time()
self.log_info(fl_ctx, f"Decryption time for {n_decrypted} of {n_total} params {end_time - start_time} seconds.")
return params
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Filter process apply to the Shareable object.
Args:
dxo: Data Exchange Object
shareable: shareable
fl_ctx: FLContext
Returns: DXO object with decrypted weights
"""
# TODO: could be removed later
if self.tenseal_context is None:
self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx)
self.log_info(fl_ctx, "Running decryption...")
encrypted_layers = dxo.get_meta_prop(key=MetaKey.PROCESSED_KEYS, default=None)
if not encrypted_layers:
self.log_warning(
fl_ctx,
"DXO does not contain PROCESSED_KEYS (do nothing). "
"Note, this is normal in the first round of training, as the initial global model is not encrypted.",
)
return None
encrypted_algo = dxo.get_meta_prop(key=MetaKey.PROCESSED_ALGORITHM, default=None)
if encrypted_algo != HE_ALGORITHM_CKKS:
self.log_error(fl_ctx, "shareable is not HE CKKS encrypted")
return None
n_encrypted, n_total = count_encrypted_layers(encrypted_layers)
self.log_info(fl_ctx, f"{n_encrypted} of {n_total} layers encrypted")
decrypted_params = self.decryption(
params=dxo.data,
encrypted_layers=encrypted_layers,
fl_ctx=fl_ctx,
)
dxo.data = decrypted_params
dxo.remove_meta_props([MetaKey.PROCESSED_ALGORITHM, MetaKey.PROCESSED_KEYS])
dxo.update_shareable(shareable)
return dxo
| NVFlare-main | nvflare/app_opt/he/model_decryptor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Tuple
import xgboost as xgb
class XGBDataLoader(ABC):
@abstractmethod
def load_data(self, client_id: str) -> Tuple[xgb.core.DMatrix, xgb.core.DMatrix]:
"""Loads data for xgboost.
Returns:
A tuple of train_data, validation_data
"""
pass
| NVFlare-main | nvflare/app_opt/xgboost/data_loader.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/xgboost/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
def _get_xgboost_model_attr(xgb_model):
num_parallel_tree = int(
xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_parallel_tree"]
)
if "best_iteration" in xgb_model["learner"]["attributes"].keys():
best_iteration = int(xgb_model["learner"]["attributes"]["best_iteration"])
else:
best_iteration = 1
best_ntree_limit = int(xgb_model["learner"]["attributes"]["best_ntree_limit"])
num_trees = int(xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_trees"])
return num_parallel_tree, best_iteration, best_ntree_limit, num_trees
def update_model(prev_model, model_update):
if not prev_model:
return model_update
else:
# Append all trees
# get the parameters
pre_num_parallel_tree, pre_best_iteration, pre_best_ntree_limit, pre_num_trees = _get_xgboost_model_attr(
prev_model
)
cur_num_parallel_tree, add_best_iteration, add_best_ntree_limit, add_num_trees = _get_xgboost_model_attr(
model_update
)
# check num_parallel_tree, should be consistent
if cur_num_parallel_tree != pre_num_parallel_tree:
raise ValueError(
f"add_num_parallel_tree should not change, previous {pre_num_parallel_tree}, current {add_num_parallel_tree}"
)
prev_model["learner"]["attributes"]["best_iteration"] = str(pre_best_iteration + 1)
prev_model["learner"]["attributes"]["best_ntree_limit"] = str(pre_best_ntree_limit + cur_num_parallel_tree)
prev_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"]["num_trees"] = str(
pre_num_trees + cur_num_parallel_tree
)
# append the new trees
append_info = model_update["learner"]["gradient_booster"]["model"]["trees"]
for tree_ct in range(cur_num_parallel_tree):
append_info[tree_ct]["id"] = pre_num_trees + tree_ct
prev_model["learner"]["gradient_booster"]["model"]["trees"].append(append_info[tree_ct])
prev_model["learner"]["gradient_booster"]["model"]["tree_info"].append(0)
return prev_model
class XGBModelShareableGenerator(ShareableGenerator):
def __init__(self):
super().__init__()
self.shareable = None
def learnable_to_shareable(self, model_learnable: ModelLearnable, fl_ctx: FLContext) -> Shareable:
"""Convert ModelLearnable to Shareable.
Args:
model_learnable (ModelLearnable): model to be converted
fl_ctx (FLContext): FL context
Returns:
Shareable: a shareable containing a DXO object.
"""
if not self.shareable:
# initialization or recovering from previous training
model = model_learnable[ModelLearnableKey.WEIGHTS]
if model:
# recovering from previous run - distinguish between cyclic and bagging modes as
# global model format is different
if isinstance(model, dict):
# bagging mode
serialized_model = bytearray(json.dumps(model), "utf-8")
else:
# cyclic mode, model should be serialized already
serialized_model = model
dxo = DXO(data_kind=DataKind.XGB_MODEL, data={"model_data": serialized_model})
else:
# initial run, starting from empty model
dxo = model_learnable_to_dxo(model_learnable)
return dxo.to_shareable()
else:
# return shareable saved from previous call to shareable_to_learnable
return self.shareable
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
"""Convert Shareable to ModelLearnable.
Supporting TYPE == TYPE_XGB_MODEL
Args:
shareable (Shareable): Shareable that contains a DXO object
fl_ctx (FLContext): FL context
Returns:
A ModelLearnable object
Raises:
TypeError: if shareable is not of type shareable
ValueError: if data_kind is not `DataKind.XGB_MODEL`
"""
if not isinstance(shareable, Shareable):
raise TypeError("shareable must be Shareable, but got {}.".format(type(shareable)))
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
if not base_model:
self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
return base_model
dxo = from_shareable(shareable)
if dxo.data_kind == DataKind.XGB_MODEL:
model_update = dxo.data
if not model_update:
self.log_info(fl_ctx, "No model update found. Model will not be updated.")
else:
model_data_dict = model_update.get("model_data_dict")
if model_data_dict:
# model update is from aggregator in bagging mode, update global model
model = base_model[ModelLearnableKey.WEIGHTS]
for update in model_data_dict:
model = update_model(model, update)
# remove model update dict from shareable that will be sent
dxo.data = {"model_data": model_update["model_data"]}
else:
# model update is serialized full model currently in cyclic mode
model = model_update.get("model_data")
base_model[ModelLearnableKey.WEIGHTS] = model
self.shareable = dxo.to_shareable()
else:
raise ValueError("data_kind should be either DataKind.XGB_MODEL, but got {}".format(dxo.data_kind))
return base_model
| NVFlare-main | nvflare/app_opt/xgboost/tree_based/shareable_generator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/xgboost/tree_based/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
class XGBModelPersistor(ModelPersistor):
def __init__(self, save_name="xgboost_model.json", load_as_dict=True):
super().__init__()
self.save_name = save_name
self.load_as_dict = load_as_dict
def _initialize(self, fl_ctx: FLContext):
# get save path from FLContext
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.log_dir = app_root
self.save_path = os.path.join(self.log_dir, self.save_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Initialize and load the Model.
Args:
fl_ctx: FLContext
Returns:
ModelLearnable object
"""
model = None
if os.path.exists(self.save_path):
self.logger.info("Loading server model")
with open(self.save_path, "r") as json_file:
model = json.load(json_file)
if not self.load_as_dict:
model = bytearray(json.dumps(model), "utf-8")
else:
self.logger.info("Initializing server model as None")
model_learnable = make_model_learnable(weights=model, meta_props=dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Persists the Model object.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
if model_learnable:
if fl_ctx.get_prop(AppConstants.CURRENT_ROUND) == fl_ctx.get_prop(AppConstants.NUM_ROUNDS) - 1:
self.logger.info(f"Saving received model to {os.path.abspath(self.save_path)}")
# save 'weights' which is actual model, loadable by xgboost library
model = model_learnable[ModelLearnableKey.WEIGHTS]
with open(self.save_path, "w") as f:
if isinstance(model, dict):
json.dump(model, f)
elif isinstance(model, bytes) or isinstance(model, bytearray) or isinstance(model, str):
# should already be json, but double check by loading and dumping at some extra cost
json.dump(json.loads(model), f)
else:
self.logger.error("unknown model format")
self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
| NVFlare-main | nvflare/app_opt/xgboost/tree_based/model_persistor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.app_constant import AppConstants
class XGBBaggingAggregator(Aggregator):
def __init__(self):
"""Perform bagging aggregation for XGBoost trees.
The trees are pre-weighted during training.
Bagging aggregation simply add the new trees to existing global model.
"""
super().__init__()
self.logger.debug(f"expected data kind: {DataKind.XGB_MODEL}")
self.history = []
self.local_models = []
self.local_models_as_dict = []
self.global_model = None
self.expected_data_kind = DataKind.XGB_MODEL
self.num_trees = 0
def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:
"""Store shareable and update aggregator's internal state
Args:
shareable: information from contributor
fl_ctx: context provided by workflow
Returns:
The first boolean indicates if this shareable is accepted.
The second boolean indicates if aggregate can be called.
"""
try:
dxo = from_shareable(shareable)
except Exception:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
contributor_name = shareable.get_peer_prop(key=ReservedKey.IDENTITY_NAME, default="?")
contribution_round = shareable.get_cookie(AppConstants.CONTRIBUTION_ROUND)
rc = shareable.get_return_code()
if rc and rc != ReturnCode.OK:
self.log_warning(fl_ctx, f"Contributor {contributor_name} returned rc: {rc}. Disregarding contribution.")
return False
if dxo.data_kind != self.expected_data_kind:
self.log_error(fl_ctx, "expected {} but got {}".format(self.expected_data_kind, dxo.data_kind))
return False
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_debug(fl_ctx, f"current_round: {current_round}")
if contribution_round != current_round:
self.log_warning(
fl_ctx,
f"discarding DXO from {contributor_name} at round: "
f"{contribution_round}. Current round is: {current_round}",
)
return False
for item in self.history:
if contributor_name == item["contributor_name"]:
prev_round = item["round"]
self.log_warning(
fl_ctx,
f"discarding DXO from {contributor_name} at round: "
f"{contribution_round} as {prev_round} accepted already",
)
return False
data = dxo.data
if data is None:
self.log_error(fl_ctx, "no data to aggregate")
return False
else:
self.local_models.append(data["model_data"])
self.local_models_as_dict.append(json.loads(data["model_data"]))
self.history.append(
{
"contributor_name": contributor_name,
"round": contribution_round,
}
)
return True
def aggregate(self, fl_ctx: FLContext) -> Shareable:
"""Called when workflow determines to generate shareable to send back to contributors
Args:
fl_ctx (FLContext): context provided by workflow
Returns:
Shareable: the weighted mean of accepted shareables from contributors
"""
self.log_debug(fl_ctx, "Start aggregation")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
site_num = len(self.history)
self.log_info(fl_ctx, f"aggregating {site_num} update(s) at round {current_round}")
self.history = []
self.log_debug(fl_ctx, "End aggregation")
local_updates = self.local_models
local_updates_as_dict = self.local_models_as_dict
self.local_models = []
self.local_models_as_dict = []
dxo = DXO(
data_kind=self.expected_data_kind,
data={"model_data": local_updates, "model_data_dict": local_updates_as_dict},
)
return dxo.to_shareable()
| NVFlare-main | nvflare/app_opt/xgboost/tree_based/bagging_aggregator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import xgboost as xgb
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
from nvflare.app_opt.xgboost.tree_based.shareable_generator import update_model
from nvflare.fuel.utils.import_utils import optional_import
from nvflare.security.logging import secure_format_exception
class FedXGBTreeExecutor(Executor):
def __init__(
self,
training_mode,
lr_scale,
data_loader_id: str,
num_client_bagging: int = 1,
lr_mode: str = "uniform",
local_model_path: str = "model.json",
global_model_path: str = "model_global.json",
learning_rate: float = 0.1,
objective: str = "binary:logistic",
num_local_parallel_tree: int = 1,
local_subsample: float = 1,
max_depth: int = 8,
eval_metric: str = "auc",
nthread: int = 16,
tree_method: str = "hist",
train_task_name: str = AppConstants.TASK_TRAIN,
):
super().__init__()
self.client_id = None
self.writer = None
self.training_mode = training_mode
self.num_client_bagging = num_client_bagging
self.lr = None
self.lr_scale = lr_scale
self.base_lr = learning_rate
self.lr_mode = lr_mode
self.num_local_parallel_tree = num_local_parallel_tree
self.local_subsample = local_subsample
self.local_model_path = local_model_path
self.global_model_path = global_model_path
self.objective = objective
self.max_depth = max_depth
self.eval_metric = eval_metric
self.nthread = nthread
self.tree_method = tree_method
self.train_task_name = train_task_name
self.num_local_round = 1
self.bst = None
self.global_model_as_dict = None
self.config = None
self.local_model = None
self.data_loader_id = data_loader_id
self.train_data = None
self.val_data = None
# use dynamic shrinkage - adjusted by personalized scaling factor
if lr_mode not in ["uniform", "scaled"]:
raise ValueError(f"Only support [uniform] or [scaled] mode, but got {lr_mode}")
def initialize(self, fl_ctx: FLContext):
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
self.local_model_path = os.path.join(app_dir, self.local_model_path)
self.global_model_path = os.path.join(app_dir, self.global_model_path)
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local training info of current model
tensorboard, flag = optional_import(module="torch.utils.tensorboard")
if flag:
self.writer = tensorboard.SummaryWriter(app_dir)
if self.training_mode not in ["cyclic", "bagging"]:
self.system_panic(f"Only support [cyclic] or [bagging] mode, but got {self.training_mode}", fl_ctx)
return
# load data and lr_scale, this is task/site-specific
data_loader = engine.get_component(self.data_loader_id)
if not isinstance(data_loader, XGBDataLoader):
self.system_panic("data_loader should be type XGBDataLoader", fl_ctx)
try:
self.train_data, self.val_data = data_loader.load_data(self.client_id)
except Exception as e:
self.system_panic(f"load_data failed: {secure_format_exception(e)}", fl_ctx)
self.lr = self._get_effective_learning_rate()
def _get_effective_learning_rate(self):
if self.training_mode == "bagging":
# Bagging mode
if self.lr_mode == "uniform":
# uniform lr, global learning_rate scaled by num_client_bagging for bagging
lr = self.base_lr / self.num_client_bagging
else:
# scaled lr, global learning_rate scaled by data size percentage
lr = self.base_lr * self.lr_scale
else:
# Cyclic mode, directly use the base learning_rate
lr = self.base_lr
return lr
def _get_xgb_train_params(self):
params = {
"objective": self.objective,
"eta": self.lr,
"max_depth": self.max_depth,
"eval_metric": self.eval_metric,
"nthread": self.nthread,
"num_parallel_tree": self.num_local_parallel_tree,
"subsample": self.local_subsample,
"tree_method": self.tree_method,
}
return params
def _local_boost_bagging(self, fl_ctx: FLContext):
eval_results = self.bst.eval_set(
evals=[(self.train_data, "train"), (self.val_data, "valid")], iteration=self.bst.num_boosted_rounds() - 1
)
self.log_info(fl_ctx, eval_results)
auc = float(eval_results.split("\t")[2].split(":")[1])
for i in range(self.num_local_round):
self.bst.update(self.train_data, self.bst.num_boosted_rounds())
# extract newly added self.num_local_round using xgboost slicing api
bst = self.bst[self.bst.num_boosted_rounds() - self.num_local_round : self.bst.num_boosted_rounds()]
self.log_info(
fl_ctx,
f"Global AUC {auc}",
)
if self.writer:
# note: writing auc before current training step, for passed in global model
self.writer.add_scalar(
"AUC", auc, int((self.bst.num_boosted_rounds() - self.num_local_round - 1) / self.num_client_bagging)
)
return bst
def _local_boost_cyclic(self, fl_ctx: FLContext):
# Cyclic mode
# starting from global model
# return the whole boosting tree series
self.bst.update(self.train_data, self.bst.num_boosted_rounds())
eval_results = self.bst.eval_set(
evals=[(self.train_data, "train"), (self.val_data, "valid")], iteration=self.bst.num_boosted_rounds() - 1
)
self.log_info(fl_ctx, eval_results)
auc = float(eval_results.split("\t")[2].split(":")[1])
self.log_info(
fl_ctx,
f"Client {self.client_id} AUC after training: {auc}",
)
if self.writer:
self.writer.add_scalar("AUC", auc, self.bst.num_boosted_rounds() - 1)
return self.bst
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
if abort_signal.triggered:
self.finalize(fl_ctx)
return make_reply(ReturnCode.TASK_ABORTED)
# retrieve current global model download from server's shareable
dxo = from_shareable(shareable)
model_update = dxo.data
# xgboost parameters
params = self._get_xgb_train_params()
if not self.bst:
# First round
self.log_info(
fl_ctx,
f"Client {self.client_id} initial training from scratch",
)
if not model_update:
bst = xgb.train(
params,
self.train_data,
num_boost_round=self.num_local_round,
evals=[(self.val_data, "validate"), (self.train_data, "train")],
)
else:
loadable_model = bytearray(model_update["model_data"])
bst = xgb.train(
params,
self.train_data,
num_boost_round=self.num_local_round,
xgb_model=loadable_model,
evals=[(self.val_data, "validate"), (self.train_data, "train")],
)
self.config = bst.save_config()
self.bst = bst
else:
self.log_info(
fl_ctx,
f"Client {self.client_id} model updates received from server",
)
if self.training_mode == "bagging":
model_updates = model_update["model_data"]
for update in model_updates:
self.global_model_as_dict = update_model(self.global_model_as_dict, json.loads(update))
loadable_model = bytearray(json.dumps(self.global_model_as_dict), "utf-8")
else:
loadable_model = bytearray(model_update["model_data"])
self.log_info(
fl_ctx,
f"Client {self.client_id} converted global model to json ",
)
self.bst.load_model(loadable_model)
self.bst.load_config(self.config)
self.log_info(
fl_ctx,
f"Client {self.client_id} loaded global model into booster ",
)
# train local model starting with global model
if self.training_mode == "bagging":
bst = self._local_boost_bagging(fl_ctx)
else:
bst = self._local_boost_cyclic(fl_ctx)
self.local_model = bst.save_raw("json")
# report updated model in shareable
dxo = DXO(data_kind=DataKind.XGB_MODEL, data={"model_data": self.local_model})
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
new_shareable = dxo.to_shareable()
if self.writer:
self.writer.flush()
return new_shareable
def finalize(self, fl_ctx: FLContext):
# freeing resources in finalize avoids seg fault during shutdown of gpu mode
del self.bst
del self.train_data
del self.val_data
self.log_info(fl_ctx, "Freed training resources")
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
try:
if task_name == "train":
return self.train(shareable, fl_ctx, abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
| NVFlare-main | nvflare/app_opt/xgboost/tree_based/executor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import Task
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import Controller
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.apis.workspace import Workspace
from nvflare.fuel.utils.import_utils import optional_import
from nvflare.fuel.utils.network_utils import get_open_ports
from nvflare.security.logging import secure_format_exception, secure_format_traceback
from .constants import XGB_TRAIN_TASK, XGBShareableHeader
class XGBFedController(Controller):
def __init__(self, train_timeout: int = 300, port: int = None):
"""Federated XGBoost training controller for histogram-base collaboration.
It starts the XGBoost federated server and kicks off all the XGBoost job on
each NVFlare client. The configuration is generic for this component and
no modification is needed for most training jobs.
Args:
train_timeout (int, optional): Time to wait for clients to do local training in seconds.
port (int, optional): the port to open XGBoost FL server
Raises:
TypeError: when any of input arguments does not have correct type
ValueError: when any of input arguments is out of range
"""
super().__init__()
if not isinstance(train_timeout, int):
raise TypeError("train_timeout must be int but got {}".format(type(train_timeout)))
self._port = port
self._xgb_fl_server = None
self._participate_clients = None
self._rank_map = None
self._secure = False
self._train_timeout = train_timeout
self._server_cert_path = None
self._server_key_path = None
self._ca_cert_path = None
self._started = False
def _get_certificates(self, fl_ctx: FLContext):
workspace: Workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
bin_folder = workspace.get_startup_kit_dir()
server_cert_path = os.path.join(bin_folder, "server.crt")
if not os.path.exists(server_cert_path):
self.log_error(fl_ctx, "Missing server certificate (server.crt)")
return False
server_key_path = os.path.join(bin_folder, "server.key")
if not os.path.exists(server_key_path):
self.log_error(fl_ctx, "Missing server key (server.key)")
return False
ca_cert_path = os.path.join(bin_folder, "rootCA.pem")
if not os.path.exists(ca_cert_path):
self.log_error(fl_ctx, "Missing ca certificate (rootCA.pem)")
return False
self._server_cert_path = server_cert_path
self._server_key_path = server_key_path
self._ca_cert_path = ca_cert_path
return True
def start_controller(self, fl_ctx: FLContext):
self.log_info(fl_ctx, f"Initializing {self.__class__.__name__} workflow.")
xgb_federated, flag = optional_import(module="xgboost.federated")
if not flag:
self.log_error(fl_ctx, "Can't import xgboost.federated")
return
# Assumption: all clients are used
clients = self._engine.get_clients()
# Sort by client name so rank is consistent
clients.sort(key=lambda client: client.name)
rank_map = {clients[i].name: i for i in range(0, len(clients))}
self._rank_map = rank_map
self._participate_clients = clients
if not self._port:
self._port = get_open_ports(1)[0]
self.log_info(fl_ctx, f"Starting XGBoost FL server on port {self._port}")
self._secure = self._engine.server.secure_train
if self._secure:
if not self._get_certificates(fl_ctx):
self.log_error(fl_ctx, "Can't get required certificates for XGB FL server in secure mode.")
return
self._xgb_fl_server = multiprocessing.Process(
target=xgb_federated.run_federated_server,
args=(self._port, len(clients), self._server_key_path, self._server_cert_path, self._ca_cert_path),
)
else:
self._xgb_fl_server = multiprocessing.Process(
target=xgb_federated.run_federated_server, args=(self._port, len(clients))
)
self._xgb_fl_server.start()
self._started = True
def stop_controller(self, fl_ctx: FLContext):
if self._xgb_fl_server:
self._xgb_fl_server.terminate()
self._started = False
def process_result_of_unknown_task(
self, client: Client, task_name, client_task_id, result: Shareable, fl_ctx: FLContext
):
self.log_error(fl_ctx, f"Unknown task: {task_name} from client {client.name}.")
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
self.log_info(fl_ctx, "Begin XGBoost training phase.")
if not self._started:
msg = "Controller does not start successfully."
self.log_error(fl_ctx, msg)
self.system_panic(msg, fl_ctx)
return
try:
data = Shareable()
data.set_header(XGBShareableHeader.WORLD_SIZE, len(self._participate_clients))
data.set_header(XGBShareableHeader.RANK_MAP, self._rank_map)
data.set_header(XGBShareableHeader.XGB_FL_SERVER_PORT, self._port)
data.set_header(XGBShareableHeader.XGB_FL_SERVER_SECURE, self._secure)
train_task = Task(
name=XGB_TRAIN_TASK,
data=data,
timeout=self._train_timeout,
)
self.broadcast_and_wait(
task=train_task,
targets=self._participate_clients,
min_responses=len(self._participate_clients),
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
self.log_info(fl_ctx, "Finish training phase.")
except Exception as e:
err = secure_format_traceback()
error_msg = f"Exception in control_flow: {secure_format_exception(e)}: {err}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(secure_format_exception(e), fl_ctx)
| NVFlare-main | nvflare/app_opt/xgboost/histogram_based/controller.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XGB_TRAIN_TASK = "train"
class XGBShareableHeader:
WORLD_SIZE = "_world_size"
RANK_MAP = "_rank_map"
XGB_FL_SERVER_PORT = "_server_port"
XGB_FL_SERVER_SECURE = "_secure_server"
| NVFlare-main | nvflare/app_opt/xgboost/histogram_based/constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/xgboost/histogram_based/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xgboost as xgb
from xgboost import callback
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.apis.workspace import Workspace
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
from nvflare.app_opt.xgboost.histogram_based.constants import XGB_TRAIN_TASK, XGBShareableHeader
from nvflare.fuel.utils.import_utils import optional_import
from nvflare.security.logging import secure_format_exception, secure_log_traceback
class XGBoostParams:
def __init__(self, xgb_params: dict, num_rounds=10, early_stopping_rounds=2, verbose_eval=False):
"""Container for all XGBoost parameters.
Args:
xgb_params: This dict is passed to `xgboost.train()` as the first argument `params`.
It contains all the Booster parameters.
Please refer to XGBoost documentation for details:
https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.training
"""
self.num_rounds = num_rounds
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.xgb_params: dict = xgb_params if xgb_params else {}
class TensorBoardCallback(xgb.callback.TrainingCallback):
def __init__(self, app_dir: str, tensorboard):
self.train_writer = tensorboard.SummaryWriter(log_dir=os.path.join(app_dir, "train-auc/"))
self.val_writer = tensorboard.SummaryWriter(log_dir=os.path.join(app_dir, "val-auc/"))
def after_iteration(self, model, epoch: int, evals_log: xgb.callback.TrainingCallback.EvalsLog):
if not evals_log:
return False
for data, metric in evals_log.items():
for metric_name, log in metric.items():
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
if data == "train":
self.train_writer.add_scalar(metric_name, score, epoch)
else:
self.val_writer.add_scalar(metric_name, score, epoch)
return False
class FedXGBHistogramExecutor(Executor):
"""Federated XGBoost Executor Spec for histogram-base collaboration.
This class implements a basic xgb_train logic, feel free to overwrite the function for custom behavior.
"""
def __init__(self, num_rounds, early_stopping_rounds, xgb_params: dict, data_loader_id: str, verbose_eval=False):
"""Federated XGBoost Executor for histogram-base collaboration.
This class sets up the training environment for Federated XGBoost.
This is the executor running on each NVFlare client, which starts XGBoost training.
Args:
num_rounds: number of boosting rounds
early_stopping_rounds: early stopping rounds
xgb_params: This dict is passed to `xgboost.train()` as the first argument `params`.
It contains all the Booster parameters.
Please refer to XGBoost documentation for details:
https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.training
data_loader_id: the ID points to XGBDataLoader.
verbose_eval: verbose_eval in xgboost.train
"""
super().__init__()
self.app_dir = None
self.num_rounds = num_rounds
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.xgb_params = xgb_params
self.rank = None
self.world_size = None
self.client_id = None
self._ca_cert_path = None
self._client_key_path = None
self._client_cert_path = None
self._server_address = "localhost"
self.data_loader_id = data_loader_id
self.train_data = None
self.val_data = None
def initialize(self, fl_ctx):
self.client_id = fl_ctx.get_identity_name()
self._server_address = self._get_server_address(fl_ctx)
self.log_info(fl_ctx, f"server address is {self._server_address}")
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
self.app_dir = ws.get_app_dir(fl_ctx.get_job_id())
self.data_loader = engine.get_component(self.data_loader_id)
if not isinstance(self.data_loader, XGBDataLoader):
self.system_panic("data_loader should be type XGBDataLoader", fl_ctx)
def xgb_train(self, params: XGBoostParams) -> xgb.core.Booster:
"""XGBoost training logic.
Args:
params (XGBoostParams): xgboost parameters.
Returns:
A xgboost booster.
"""
# Load file, file will not be sharded in federated mode.
dtrain = self.train_data
dval = self.val_data
# Specify validations set to watch performance
watchlist = [(dval, "eval"), (dtrain, "train")]
callbacks = [callback.EvaluationMonitor(rank=self.rank)]
tensorboard, flag = optional_import(module="torch.utils.tensorboard")
if flag and self.app_dir:
callbacks.append(TensorBoardCallback(self.app_dir, tensorboard))
# Run training, all the features in training API is available.
bst = xgb.train(
params.xgb_params,
dtrain,
params.num_rounds,
evals=watchlist,
early_stopping_rounds=params.early_stopping_rounds,
verbose_eval=params.verbose_eval,
callbacks=callbacks,
)
return bst
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
def _get_server_address(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
if engine.client.overseer_agent:
sp = engine.client.overseer_agent.get_primary_sp()
if sp and sp.primary is True:
return sp.name
self.log_info(fl_ctx, "Unable to get primary sp from overseer. Using previously known server address")
return self._server_address
def _get_certificates(self, fl_ctx: FLContext):
workspace: Workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
bin_folder = workspace.get_startup_kit_dir()
ca_cert_path = os.path.join(bin_folder, "rootCA.pem")
if not os.path.exists(ca_cert_path):
self.log_error(fl_ctx, "Missing ca certificate (rootCA.pem)")
return False
client_key_path = os.path.join(bin_folder, "client.key")
if not os.path.exists(client_key_path):
self.log_error(fl_ctx, "Missing client key (client.key)")
return False
client_cert_path = os.path.join(bin_folder, "client.crt")
if not os.path.exists(client_cert_path):
self.log_error(fl_ctx, "Missing client certificate (client.crt)")
return False
self._ca_cert_path = ca_cert_path
self._client_key_path = client_key_path
self._client_cert_path = client_cert_path
return True
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
try:
if task_name == XGB_TRAIN_TASK:
return self.train(shareable, fl_ctx, abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"learner execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""XGBoost training task pipeline which handles NVFlare specific tasks"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Print round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
client_name = fl_ctx.get_identity_name()
self.log_info(fl_ctx, f"Client: {client_name} Round: {current_round}/{total_rounds}")
rank_map = shareable.get_header(XGBShareableHeader.RANK_MAP)
if client_name not in rank_map:
self.log_error(fl_ctx, f"Train failed due to client {client_name} missing in rank_map: {rank_map}")
return make_reply(ReturnCode.ERROR)
world_size = shareable.get_header(XGBShareableHeader.WORLD_SIZE)
if world_size is None:
self.log_error(fl_ctx, f"Train failed in client {client_name}: missing xgboost world size in header.")
return make_reply(ReturnCode.ERROR)
xgb_fl_server_port = shareable.get_header(XGBShareableHeader.XGB_FL_SERVER_PORT)
if xgb_fl_server_port is None:
self.log_error(fl_ctx, f"Train failed in client {client_name}: missing xgboost FL server port in header.")
return make_reply(ReturnCode.ERROR)
secure_comm = shareable.get_header(XGBShareableHeader.XGB_FL_SERVER_SECURE)
if secure_comm is None:
self.log_error(fl_ctx, f"Train failed in client {client_name}: missing xgboost secure_comm in header.")
return make_reply(ReturnCode.ERROR)
self.rank = rank_map[client_name]
self.world_size = world_size
self.log_info(fl_ctx, f"Using xgb params: {self.xgb_params}")
params = XGBoostParams(
xgb_params=self.xgb_params,
num_rounds=self.num_rounds,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
)
self._server_address = self._get_server_address(fl_ctx)
self.log_info(fl_ctx, f"server address is {self._server_address}")
communicator_env = {
"xgboost_communicator": "federated",
"federated_server_address": f"{self._server_address}:{xgb_fl_server_port}",
"federated_world_size": self.world_size,
"federated_rank": self.rank,
}
if secure_comm:
if not self._get_certificates(fl_ctx):
return make_reply(ReturnCode.ERROR)
communicator_env["federated_server_cert"] = self._ca_cert_path
communicator_env["federated_client_key"] = self._client_key_path
communicator_env["federated_client_cert"] = self._client_cert_path
try:
with xgb.collective.CommunicatorContext(**communicator_env):
# Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL
if not self.train_data or not self.val_data:
self.train_data, self.val_data = self.data_loader.load_data(self.client_id)
bst = self.xgb_train(params)
# Save the model.
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = workspace.get_run_dir(run_number)
bst.save_model(os.path.join(run_dir, "test.model.json"))
xgb.collective.communicator_print("Finished training\n")
except Exception as e:
secure_log_traceback()
self.log_error(fl_ctx, f"Exception happens when running xgb train: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.OK)
| NVFlare-main | nvflare/app_opt/xgboost/histogram_based/executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from joblib import dump, load
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
class JoblibModelParamPersistor(ModelPersistor):
def __init__(self, initial_params, save_name="model_param.joblib"):
"""
Persist global model parameters from a dict to a joblib file
Note that this contains the necessary information to build
a certain model but may not be directly loadable
"""
super().__init__()
self.initial_params = initial_params
self.save_name = save_name
def _initialize(self, fl_ctx: FLContext):
# get save path from FLContext
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.log_dir = app_root
self.save_path = os.path.join(self.log_dir, self.save_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Initialize and load the Model.
Args:
fl_ctx: FLContext
Returns:
ModelLearnable object
"""
if os.path.exists(self.save_path):
self.logger.info("Loading server model")
model = load(self.save_path)
else:
self.logger.info(f"Initialization, sending global settings: {self.initial_params}")
model = self.initial_params
model_learnable = make_model_learnable(weights=model, meta_props=dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Persists the Model object.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
if model_learnable:
if fl_ctx.get_prop(AppConstants.CURRENT_ROUND) == fl_ctx.get_prop(AppConstants.NUM_ROUNDS) - 1:
self.logger.info(f"Saving received model to {os.path.abspath(self.save_path)}")
# save 'weights' which contains model parameters
model = model_learnable[ModelLearnableKey.WEIGHTS]
dump(model, self.save_path, compress=1)
| NVFlare-main | nvflare/app_opt/sklearn/joblib_model_param_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import joblib
import tensorboard
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants
from nvflare.security.logging import secure_format_exception
def _get_global_params(shareable: Shareable, fl_ctx: FLContext):
# retrieve current global params download from server's shareable
dxo = from_shareable(shareable)
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, current_round)
return current_round, dxo.data
class SKLearnExecutor(Executor):
def __init__(self, learner_id: str, train_task=AppConstants.TASK_TRAIN):
"""An Executor interface for scikit-learn Learner.
Args:
learner_id (str): id pointing to the learner object
train_task (str, optional): label to dispatch train task. Defaults to AppConstants.TASK_TRAIN.
"""
super().__init__()
self.learner_id = learner_id
self.learner = None
self.train_task = train_task
self.local_model_path = None
self.global_model_path = None
self.client_id = None
self.writer = None
self.fl_ctx = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.ABORT_TASK:
try:
if self.learner:
self.learner.abort(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner abort exception: {secure_format_exception(e)}")
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
def initialize(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
self._print_configs(fl_ctx)
self.load_log_tracker()
try:
engine = fl_ctx.get_engine()
self.learner = engine.get_component(self.learner_id)
if not isinstance(self.learner, Learner):
raise TypeError(f"learner must be Learner type. Got: {type(self.learner)}")
self.learner.initialize(engine.get_all_components(), fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner initialize exception: {secure_format_exception(e)}")
# set the paths according to fl_ctx
app_dir = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.local_model_path = os.path.join(app_dir, "model_local.joblib")
self.global_model_path = os.path.join(app_dir, "model_global.joblib")
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
if abort_signal.triggered:
self.finalize(fl_ctx)
return make_reply(ReturnCode.TASK_ABORTED)
try:
if task_name == self.train_task:
(current_round, global_params) = _get_global_params(shareable, fl_ctx)
if current_round > 0:
# first round for parameter initialization
# no model evaluation
self.validate(current_round, global_params, fl_ctx)
return self.train(current_round, global_params, fl_ctx)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"learner execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def train(self, current_round, global_param, fl_ctx: FLContext) -> Shareable:
self.log_info(fl_ctx, f"Client {self.client_id} perform local train")
# sklearn algorithms usually needs two different processing schemes
# one for first round (generate initial centers for clustering, regular training for svm)
# the other for following rounds (regular training for clustering, no further training for svm)
# hence the current round is fed to learner to distinguish the two
params, model = self.learner.train(current_round, global_param, fl_ctx)
# save model and return dxo containing the params
self.save_model_local(model)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=params)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, self.learner.n_samples)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, current_round, global_param, fl_ctx: FLContext) -> Shareable:
# retrieve current global center download from server's shareable
self.log_info(fl_ctx, f"Client {self.client_id} perform local evaluation")
metrics, model = self.learner.validate(current_round, global_param, fl_ctx)
self.save_model_global(model)
for key, value in metrics.items():
self.log_value(key, value, current_round)
def finalize(self, fl_ctx: FLContext):
try:
if self.learner:
self.learner.finalize(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner finalize exception: {secure_format_exception(e)}")
def _print_configs(self, fl_ctx: FLContext):
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with configs: \n {fl_args}",
)
def load_log_tracker(self):
app_dir = self.fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.writer = tensorboard.summary.Writer(app_dir)
def log_value(self, key, value, step):
if self.writer:
self.writer.add_scalar(key, value, step)
self.writer.flush()
def save_model_local(self, model: any) -> None:
joblib.dump(model, self.local_model_path)
def save_model_global(self, model: any) -> None:
joblib.dump(model, self.global_model_path)
| NVFlare-main | nvflare/app_opt/sklearn/sklearn_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
pd_readers = {
"csv": pd.read_csv,
"xls": pd.read_excel,
"xlsx": pd.read_excel,
"json": pd.read_json,
}
def _to_data_tuple(data):
data_num = data.shape[0]
# split to feature and label
x = data.iloc[:, 1:]
y = data.iloc[:, 0]
return x.to_numpy(), y.to_numpy(), data_num
def get_pandas_reader(data_path: str):
from nvflare.app_common.utils.file_utils import get_file_format
file_format = get_file_format(data_path)
reader = pd_readers.get(file_format, None)
if reader is None:
raise ValueError(f"no pandas reader for given file format {file_format}")
return reader
def load_data(data_path: str, require_header: bool = False):
reader = get_pandas_reader(data_path)
if hasattr(reader, "header"):
if require_header:
data = reader(data_path)
else:
data = reader(data_path, header=None)
else:
data = reader(data_path)
return _to_data_tuple(data)
def load_data_for_range(data_path: str, start: int, end: int, require_header: bool = False):
reader = get_pandas_reader(data_path)
if hasattr(reader, "skiprows"):
data_size = end - start
if hasattr(reader, "header") and not require_header:
data = reader(data_path, header=None, skiprows=start, nrows=data_size)
else:
data = reader(data_path, skiprows=start, nrows=data_size)
else:
data = reader(data_path).iloc[start:end]
return _to_data_tuple(data)
| NVFlare-main | nvflare/app_opt/sklearn/data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/sklearn/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from typing import Any
import numpy as np
import torch
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.fobs.datum import DatumManager
class TensorDecomposer(fobs.Decomposer):
def supported_type(self):
return torch.Tensor
def decompose(self, target: torch.Tensor, manager: DatumManager = None) -> Any:
stream = BytesIO()
# torch.save uses Pickle so converting Tensor to ndarray first
array = target.detach().cpu().numpy()
np.save(stream, array, allow_pickle=False)
return stream.getvalue()
def recompose(self, data: Any, manager: DatumManager = None) -> torch.Tensor:
stream = BytesIO(data)
array = np.load(stream, allow_pickle=False)
return torch.from_numpy(array)
| NVFlare-main | nvflare/app_opt/pt/decomposers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvflare.apis.fl_context import FLContext
from nvflare.app_opt.pt.model_reader_writer import PTModelReaderWriter
from nvflare.app_opt.pt.utils import feed_vars
from nvflare.security.logging import secure_format_exception
class HEPTModelReaderWriter(PTModelReaderWriter):
def apply_model(self, network, multi_processes: bool, model_params: dict, fl_ctx: FLContext, options=None):
"""Write global model back to local model.
Needed to extract local parameter shape to reshape decrypted vectors.
Args:
network (pytorch.nn): network object to read/write
multi_processes (bool): is the workflow in multi_processes environment
model_params (dict): which parameters to read/write
fl_ctx (FLContext): FL system-wide context
options (dict, optional): additional information on how to process read/write. Defaults to None.
Raises:
RuntimeError: unable to reshape the network layers or mismatch between network layers and model_params
Returns:
list: a list of parameters been processed
"""
try:
# net = self.fitter.net
net = network
# if self.fitter.multi_gpu:
if multi_processes:
net = net.module
# reshape decrypted parameters
local_var_dict = net.state_dict()
for var_name in local_var_dict:
if var_name in model_params:
try:
self.logger.debug(
f"Reshaping {var_name}: {np.shape(model_params[var_name])} to"
f" {local_var_dict[var_name].shape}",
)
model_params[var_name] = np.reshape(model_params[var_name], local_var_dict[var_name].shape)
except Exception as e:
raise RuntimeError(f"{self._name} reshaping Exception: {secure_format_exception(e)}")
assign_ops, updated_local_model = feed_vars(net, model_params)
self.logger.debug(f"assign_ops: {len(assign_ops)}")
self.logger.debug(f"updated_local_model: {len(updated_local_model)}")
net.load_state_dict(updated_local_model)
return assign_ops
except Exception as e:
raise RuntimeError(f"{self._name} apply_model Exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/app_opt/pt/he_model_reader_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import abstractmethod
import torch
from nvflare.apis.signal import Signal
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class PTDittoHelper(object):
def __init__(
self, criterion, model, optimizer, device, app_dir: str, ditto_lambda: float = 0.1, model_epochs: int = 1
):
"""Helper to be used with Ditto components.
Implements the functions used for the algorithm proposed in
Li et al. "Ditto: Fair and Robust Federated Learning Through Personalization"
(https://arxiv.org/abs/2012.04221) using PyTorch.
Args:
criterion: base loss criterion
model: the personalized model of Ditto method
optimizer: training optimizer for personalized model
device: device for personalized model training
app_dir: needed for local personalized model saving
ditto_lambda: lambda weight for Ditto prox loss term when combining with the base loss, defaults to 0.1
model_epochs: training epoch for personalized model, defaults to 1
Returns:
None
"""
self.criterion = criterion
self.model = model
self.optimizer = optimizer
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.model_epochs = model_epochs
# initialize Ditto criterion
self.prox_criterion = PTFedProxLoss(mu=ditto_lambda)
# check criterion, model, and optimizer type
if not isinstance(self.criterion, torch.nn.modules.loss._Loss):
raise ValueError(f"criterion component must be torch loss. " f"But got: {type(self.criterion)}")
if not isinstance(self.model, torch.nn.Module):
raise ValueError(f"model component must be torch model. " f"But got: {type(self.model)}")
if not isinstance(self.optimizer, torch.optim.Optimizer):
raise ValueError(f"optimizer component must be torch optimizer. " f"But got: {type(self.optimizer)}")
if not isinstance(self.device, torch.device):
raise ValueError(f"device component must be torch device. " f"But got: {type(self.device)}")
# initialize other recording related parameters
self.epoch_global = 0
self.epoch_of_start_time = 0
self.best_metric: int = 0
self.model_file_path = os.path.join(app_dir, "personalized_model.pt")
self.best_model_file_path = os.path.join(app_dir, "best_personalized_model.pt")
def load_model(self, global_weights):
# load local model from last round's record if model exist,
# otherwise initialize from global model weights for the first round.
if os.path.exists(self.model_file_path):
model_data = torch.load(self.model_file_path)
self.model.load_state_dict(model_data["model"])
self.epoch_of_start_time = model_data["epoch"]
else:
self.model.load_state_dict(global_weights)
self.epoch_of_start_time = 0
if os.path.exists(self.best_model_file_path):
model_data = torch.load(self.best_model_file_path)
self.best_metric = model_data["best_metric"]
def save_model(self, is_best=False):
# save personalized model locally
model_weights = self.model.state_dict()
save_dict = {"model": model_weights, "epoch": self.epoch_global}
if is_best:
save_dict.update({"best_metric": self.best_metric})
torch.save(save_dict, self.best_model_file_path)
else:
torch.save(save_dict, self.model_file_path)
def update_metric_save_model(self, metric):
self.save_model(is_best=False)
if metric > self.best_metric:
self.best_metric = metric
self.save_model(is_best=True)
@abstractmethod
def local_train(self, train_loader, model_global, abort_signal: Signal, writer):
# Train personal model for self.model_epochs, and keep track of curves
# This part is task dependent, need customization
# Basic idea is to train personalized model with prox term as compare to model_global
raise NotImplementedError
| NVFlare-main | nvflare/app_opt/pt/ditto.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.nn.modules.loss import _Loss
class PTFedProxLoss(_Loss):
def __init__(self, mu: float = 0.01) -> None:
"""Compute FedProx loss: a loss penalizing the deviation from global model.
Args:
mu: weighting parameter
"""
super().__init__()
if mu < 0.0:
raise ValueError("mu should be no less than 0.0")
self.mu = mu
def forward(self, input, target) -> torch.Tensor:
"""Forward pass in training.
Args:
input (nn.Module): the local pytorch model
target (nn.Module): the copy of global pytorch model when local clients received it
at the beginning of each local round
Returns:
FedProx loss term
"""
prox_loss: torch.Tensor = 0.0
for param, ref in zip(input.named_parameters(), target.named_parameters()):
prox_loss += (self.mu / 2) * torch.sum((param[1] - ref[1]) ** 2)
return prox_loss
| NVFlare-main | nvflare/app_opt/pt/fedproxloss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nvflare.app_common.executors.multi_process_executor import MultiProcessExecutor
from nvflare.fuel.utils.network_utils import get_open_ports
class PTMultiProcessExecutor(MultiProcessExecutor):
def get_multi_process_command(self) -> str:
return (
f"{sys.executable} -m torch.distributed.run --nproc_per_node="
+ str(self.num_of_processes)
+ " --nnodes=1 --node_rank=0"
+ ' --master_addr="localhost" --master_port='
+ str(get_open_ports(1)[0])
)
| NVFlare-main | nvflare/app_opt/pt/multi_process_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.app_opt.pt.ditto import PTDittoHelper
from nvflare.app_opt.pt.fedopt import PTFedOptModelShareableGenerator
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
from nvflare.app_opt.pt.file_model_locator import PTFileModelLocator
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
from nvflare.app_opt.pt.he_model_reader_writer import HEPTModelReaderWriter
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
from nvflare.app_opt.pt.model_reader_writer import PTModelReaderWriter
from nvflare.app_opt.pt.multi_process_executor import PTMultiProcessExecutor
from nvflare.app_opt.pt.scaffold import PTScaffoldHelper, get_lr_values
from nvflare.app_opt.pt.utils import feed_vars
| NVFlare-main | nvflare/app_opt/pt/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model_processor import ModelProcessor
from nvflare.app_opt.pt.utils import feed_vars
from nvflare.security.logging import secure_format_exception
class PTModelReaderWriter(ModelProcessor):
def __init__(self):
"""Perform the actual read/write operation for PyTorch-based models."""
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
def extract_model(self, network, multi_processes: bool, model_vars: dict, fl_ctx: FLContext) -> dict:
net = network
if multi_processes:
net = net.module
local_state_dict = net.state_dict()
self.logger.debug("setup local_model_dict")
local_model_dict = {}
for var_name in local_state_dict:
try:
local_model_dict[var_name] = local_state_dict[var_name].cpu().numpy()
except Exception as e:
raise ValueError(f"Did not work: {secure_format_exception(e)}")
self.logger.debug(f"local_model_dict {len(local_model_dict)}")
return local_model_dict
def apply_model(self, network, multi_processes: bool, model_params: dict, fl_ctx: FLContext, options=None):
"""Set the local model according to model_data.
Args:
model_params: model data information
fl_ctx (FLContext): FL Context delivered by workflow
options: . Defaults to None.
Raises:
RuntimeError: Raised when being unable to apply model_params to the network
Returns:
a list of ops applied to model
"""
try:
net = network
if multi_processes:
net = net.module
assign_ops, updated_local_model = feed_vars(net, model_params)
self.logger.debug(f"assign_ops: {len(assign_ops)}")
self.logger.debug(f"updated_local_model: {len(updated_local_model)}")
# self.fitter.net.load_state_dict(updated_local_model)
net.load_state_dict(updated_local_model)
return assign_ops
except Exception as e:
raise RuntimeError(f"load_state_dict Exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/app_opt/pt/model_reader_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.learnable import Learnable
from nvflare.app_common.abstract.model import ModelLearnableKey, make_model_learnable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator
from nvflare.security.logging import secure_format_exception
class PTFedOptModelShareableGenerator(FullModelShareableGenerator):
def __init__(
self,
optimizer_args: dict = None,
lr_scheduler_args: dict = None,
source_model="model",
device=None,
):
"""Implement the FedOpt algorithm.
The algorithm is proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020).
This SharableGenerator will update the global model using the specified
PyTorch optimizer and learning rate scheduler.
Note: This class will use FedOpt to optimize the global trainable parameters (i.e. `self.model.named_parameters()`)
but use FedAvg to update any other layers such as batch norm statistics.
Args:
optimizer_args: dictionary of optimizer arguments, e.g.
{'path': 'torch.optim.SGD', 'args': {'lr': 1.0}} (default).
lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, e.g.
{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}} (default: None).
source_model: either a valid torch model object or a component ID of a torch model object
device: specify the device to run server-side optimization, e.g. "cpu" or "cuda:0"
(will default to cuda if available and no device is specified).
Raises:
TypeError: when any of input arguments does not have correct type
"""
super().__init__()
if not optimizer_args:
self.logger("No optimizer_args provided. Using FedOpt with SGD and lr 1.0")
optimizer_args = {"name": "SGD", "args": {"lr": 1.0}}
if not isinstance(optimizer_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}}."
)
if lr_scheduler_args is not None:
if not isinstance(lr_scheduler_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. "
"{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}}."
)
self.source_model = source_model
self.optimizer_args = optimizer_args
self.lr_scheduler_args = lr_scheduler_args
self.model = None
self.optimizer = None
self.lr_scheduler = None
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
self.optimizer_name = None
self.lr_scheduler_name = None
def _get_component_name(self, component_args):
if component_args is not None:
name = component_args.get("path", None)
if name is None:
name = component_args.get("name", None)
return name
else:
return None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
# Initialize the optimizer with current global model params
engine = fl_ctx.get_engine()
if isinstance(self.source_model, str):
self.model = engine.get_component(self.source_model)
else:
self.model = self.source_model
if self.model is None:
self.system_panic(
"Model is not available",
fl_ctx,
)
return
elif not isinstance(self.model, torch.nn.Module):
self.system_panic(
f"Expected model to be a torch.nn.Module but got {type(self.model)}",
fl_ctx,
)
return
else:
print("server model", self.model)
self.model.to(self.device)
# set up optimizer
try:
# use provided or default optimizer arguments and add the model parameters
if "args" not in self.optimizer_args:
self.optimizer_args["args"] = {}
self.optimizer_args["args"]["params"] = self.model.parameters()
self.optimizer = engine.build_component(self.optimizer_args)
# get optimizer name for log
self.optimizer_name = self._get_component_name(self.optimizer_args)
except Exception as e:
self.system_panic(
f"Exception while parsing `optimizer_args`({self.optimizer_args}): {secure_format_exception(e)}",
fl_ctx,
)
return
# set up lr scheduler
if self.lr_scheduler_args is not None:
try:
self.lr_scheduler_name = self._get_component_name(self.lr_scheduler_args)
# use provided or default lr scheduler argument and add the optimizer
if "args" not in self.lr_scheduler_args:
self.lr_scheduler_args["args"] = {}
self.lr_scheduler_args["args"]["optimizer"] = self.optimizer
self.lr_scheduler = engine.build_component(self.lr_scheduler_args)
except Exception as e:
self.system_panic(
f"Exception while parsing `lr_scheduler_args`({self.lr_scheduler_args}): {secure_format_exception(e)}",
fl_ctx,
)
return
def server_update(self, model_diff):
"""Updates the global model using the specified optimizer.
Args:
model_diff: the aggregated model differences from clients.
Returns:
The updated PyTorch model state dictionary.
"""
self.model.train()
self.optimizer.zero_grad()
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
updated_params = []
for name, param in self.model.named_parameters():
if name in model_diff:
param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)
updated_params.append(name)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return self.model.state_dict(), updated_params
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:
"""Convert Shareable to Learnable while doing a FedOpt update step.
Supporting data_kind == DataKind.WEIGHT_DIFF
Args:
shareable (Shareable): Shareable to be converted
fl_ctx (FLContext): FL context
Returns:
Model: Updated global ModelLearnable.
"""
# check types
dxo = from_shareable(shareable)
if dxo.data_kind != DataKind.WEIGHT_DIFF:
self.system_panic(
"FedOpt is only implemented for " "data_kind == DataKind.WEIGHT_DIFF",
fl_ctx,
)
return Learnable()
processed_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)
if processed_algorithm is not None:
self.system_panic(
f"FedOpt is not implemented for shareable processed by {processed_algorithm}",
fl_ctx,
)
return Learnable()
model_diff = dxo.data
start = time.time()
weights, updated_params = self.server_update(model_diff)
secs = time.time() - start
# convert to numpy dict of weights
start = time.time()
for key in weights:
weights[key] = weights[key].detach().cpu().numpy()
secs_detach = time.time() - start
# update unnamed parameters such as batch norm layers if there are any using the averaged update
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
if not base_model:
self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
return base_model
base_model_weights = base_model[ModelLearnableKey.WEIGHTS]
n_fedavg = 0
for key, value in model_diff.items():
if key not in updated_params:
weights[key] = base_model_weights[key] + value
n_fedavg += 1
self.log_info(
fl_ctx,
f"FedOpt ({self.optimizer_name}, {self.device}) server model update "
f"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, "
f"{self.lr_scheduler_name if self.lr_scheduler_name else ''} "
f"lr: {self.optimizer.param_groups[-1]['lr']}, "
f"fedopt layers: {len(updated_params)}, "
f"fedavg layers: {n_fedavg}, "
f"update: {secs} secs., detach: {secs_detach} secs.",
)
# TODO: write server-side lr to tensorboard
return make_model_learnable(weights, dxo.get_meta_props())
| NVFlare-main | nvflare/app_opt/pt/fedopt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.apis.dxo import DXO
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
class PTFileModelLocator(ModelLocator):
def __init__(self, pt_persistor_id: str):
"""The ModelLocator's job is to find and locate the models inventory saved during training.
Args:
pt_persistor_id (str): ModelPersistor component ID
"""
super().__init__()
self.pt_persistor_id = pt_persistor_id
self.model_persistor = None
self.model_inventory = {}
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._initialize(fl_ctx)
def _initialize(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
self.model_persistor: PTFileModelPersistor = engine.get_component(self.pt_persistor_id)
if self.model_persistor is None or not isinstance(self.model_persistor, PTFileModelPersistor):
raise ValueError(
f"pt_persistor_id component must be PTFileModelPersistor. " f"But got: {type(self.model_persistor)}"
)
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
"""Returns the list of model names that should be included from server in cross site validation.add().
Args:
fl_ctx (FLContext): FL Context object.
Returns:
List[str]: List of model names.
"""
self.model_inventory: dict = self.model_persistor.get_model_inventory(fl_ctx)
return list(self.model_inventory.keys())
def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:
"""Call to locate and load the model weights of model_name.
Args:
model_name: name of the model
fl_ctx: FLContext
Returns: model_weight DXO
"""
if model_name not in list(self.model_inventory.keys()):
raise ValueError(f"model inventory does not contain: {model_name}")
model_learnable = self.model_persistor.get(model_name, fl_ctx)
dxo = model_learnable_to_dxo(model_learnable)
return dxo
| NVFlare-main | nvflare/app_opt/pt/file_model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from collections import OrderedDict
from typing import Dict
import torch
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants, DefaultCheckpointFileName, EnvironmentKey
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.model_desc import ModelDescriptor
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTFileModelPersistor(ModelPersistor):
def __init__(
self,
exclude_vars=None,
model=None,
global_model_file_name=DefaultCheckpointFileName.GLOBAL_MODEL,
best_global_model_file_name=DefaultCheckpointFileName.BEST_GLOBAL_MODEL,
source_ckpt_file_full_name=None,
filter_id: str = None,
):
"""Persist pytorch-based model to/from file system.
This Model Persistor tries to load PT model data in the following three ways:
1. Load from a specified source checkpoint file
2. Load from a location from the app folder
3. Load from a torch model object
The Persistor tries method 1 first if the source_ckpt_file_full_name is specified;
If source_ckpt_file_full_name is not specified, it tries method 2;
If no checkpoint location is specified in the app folder, it tries method 3.
Method 2 - Load from a location from the app folder
It is assumed that the app folder must contain the environments.json file. Among other things, this
JSON file must specify where to find the checkpoint file. It does so with two JSON elements:
- APP_CKPT_DIR: specifies the folder (within the app) where the checkpoint file resides.
- APP_CKPT: specifies the base file name of the checkpoint
Here is an example of the environments.json content::
{
"APP_CKPT_DIR": "model",
"APP_CKPT": "pretrained_model.pt"
}
In this example, the checkpoint file is located in the "model" folder within the app and is named
pretrained_model.pt.
Method 3 - Load from a torch model object. In this case, the 'model' arg must be a valid torch
model, or the component ID of a valid torch model included in the "components" section of
your config_fed_server.json.
If all 3 methods fail, system_panic() is called.
If checkpoint folder name is specified, then global model and best global model will be saved to it;
Otherwise they will be saved directly in the app folder.
The model is saved in a dict depending on the persistor you used. You might need to access it with
``model.load_state_dict(torch.load(path_to_model)["model"])`` as there is additional meta information together with the model weights.
Args:
exclude_vars (str, optional): regex expression specifying weight vars to be excluded from training. Defaults to None.
model (str, optional): torch model object or component id of the model object. Defaults to None.
global_model_file_name (str, optional): file name for saving global model. Defaults to DefaultCheckpointFileName.GLOBAL_MODEL.
best_global_model_file_name (str, optional): file name for saving best global model. Defaults to DefaultCheckpointFileName.BEST_GLOBAL_MODEL.
source_ckpt_file_full_name (str, optional): full file name for source model checkpoint file. Defaults to None.
filter_id: Optional string that defines a filter component that is applied to prepare the model to be saved,
e.g. for serialization of custom Python objects.
Raises:
ValueError: when source_ckpt_file_full_name does not exist
"""
super().__init__(
filter_id=filter_id,
)
self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None
self.model = model
self.log_dir = None
self.ckpt_preload_path = None
self.persistence_manager = None
self.ckpt_dir_env_key = EnvironmentKey.CHECKPOINT_DIR
self.ckpt_file_name_env_key = EnvironmentKey.CHECKPOINT_FILE_NAME
self.global_model_file_name = global_model_file_name
self.best_global_model_file_name = best_global_model_file_name
self.source_ckpt_file_full_name = source_ckpt_file_full_name
self.default_train_conf = None
if source_ckpt_file_full_name and not os.path.exists(source_ckpt_file_full_name):
raise ValueError(f"specified source checkpoint model file {source_ckpt_file_full_name} does not exist")
def _initialize(self, fl_ctx: FLContext):
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
env = None
run_args = fl_ctx.get_prop(FLContextKey.ARGS)
if run_args:
env_config_file_name = os.path.join(app_root, run_args.env)
if os.path.exists(env_config_file_name):
try:
with open(env_config_file_name) as file:
env = json.load(file)
except Exception:
self.system_panic(
reason="error opening env config file {}".format(env_config_file_name), fl_ctx=fl_ctx
)
return
if env is not None:
if env.get(self.ckpt_dir_env_key, None):
fl_ctx.set_prop(AppConstants.LOG_DIR, env[self.ckpt_dir_env_key], private=True, sticky=True)
if env.get(self.ckpt_file_name_env_key) is not None:
fl_ctx.set_prop(
AppConstants.CKPT_PRELOAD_PATH, env[self.ckpt_file_name_env_key], private=True, sticky=True
)
log_dir = fl_ctx.get_prop(AppConstants.LOG_DIR)
if log_dir:
self.log_dir = os.path.join(app_root, log_dir)
else:
self.log_dir = app_root
self._ckpt_save_path = os.path.join(self.log_dir, self.global_model_file_name)
self._best_ckpt_save_path = os.path.join(self.log_dir, self.best_global_model_file_name)
ckpt_preload_path = fl_ctx.get_prop(AppConstants.CKPT_PRELOAD_PATH)
if ckpt_preload_path:
self.ckpt_preload_path = os.path.join(app_root, ckpt_preload_path)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
if isinstance(self.model, str):
# treat it as model component ID
model_component_id = self.model
engine = fl_ctx.get_engine()
self.model = engine.get_component(model_component_id)
if not self.model:
self.system_panic(reason="cannot find model component '{}'".format(model_component_id), fl_ctx=fl_ctx)
return
if not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason="expect model component '{}' to be torch.nn.Module but got {}".format(
model_component_id, type(self.model)
),
fl_ctx=fl_ctx,
)
return
elif self.model and not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason="expect model to be torch.nn.Module but got {}".format(type(self.model)), fl_ctx=fl_ctx
)
return
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Convert initialised model into Learnable/Model format.
Args:
fl_ctx (FLContext): FL Context delivered by workflow
Returns:
Model: a Learnable/Model object
"""
src_file_name = None
if self.source_ckpt_file_full_name:
src_file_name = self.source_ckpt_file_full_name
elif self.ckpt_preload_path:
src_file_name = self.ckpt_preload_path
if src_file_name:
try:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = torch.load(src_file_name, map_location=device)
# "checkpoint may contain 'model', 'optimizer', 'lr_scheduler', etc. or only contain model dict directly."
except Exception:
self.log_exception(fl_ctx, "error loading checkpoint from {}".format(src_file_name))
self.system_panic(reason="cannot load model checkpoint", fl_ctx=fl_ctx)
return None
else:
# if no pretrained model provided, use the generated network weights from APP config
# note that, if set "determinism" in the config, the init model weights will always be the same
try:
data = self.model.state_dict() if self.model is not None else OrderedDict()
except Exception:
self.log_exception(fl_ctx, "error getting state_dict from model object")
self.system_panic(reason="cannot create state_dict from model object", fl_ctx=fl_ctx)
return None
if self.model:
self.default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=self.default_train_conf)
return self.persistence_manager.to_model_learnable(self.exclude_vars)
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
elif event == AppEventType.GLOBAL_BEST_MODEL_AVAILABLE:
# save the current model as the best model!
self.save_model_file(self._best_ckpt_save_path)
def save_model_file(self, save_path: str):
save_dict = self.persistence_manager.to_persistence_dict()
torch.save(save_dict, save_path)
def save_model(self, ml: ModelLearnable, fl_ctx: FLContext):
self.persistence_manager.update(ml)
self.save_model_file(self._ckpt_save_path)
def get_model(self, model_file: str, fl_ctx: FLContext) -> ModelLearnable:
try:
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Use the "cpu" to load the global model weights, avoid GPU out of memory
device = "cpu"
location = os.path.join(self.log_dir, model_file)
data = torch.load(location, map_location=device)
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=self.default_train_conf)
return persistence_manager.to_model_learnable(self.exclude_vars)
except Exception:
self.log_exception(fl_ctx, "error loading checkpoint from {}".format(model_file))
return {}
def get_model_inventory(self, fl_ctx: FLContext) -> Dict[str, ModelDescriptor]:
model_inventory = {}
location = os.path.join(self.log_dir, self.global_model_file_name)
if os.path.exists(location):
model_inventory[self.global_model_file_name] = ModelDescriptor(
name=self.global_model_file_name,
location=location,
model_format=self.persistence_manager.get_persist_model_format(),
props={},
)
location = os.path.join(self.log_dir, self.best_global_model_file_name)
if os.path.exists(location):
model_inventory[self.best_global_model_file_name] = ModelDescriptor(
name=self.best_global_model_file_name,
location=location,
model_format=self.persistence_manager.get_persist_model_format(),
props={},
)
return model_inventory
| NVFlare-main | nvflare/app_opt/pt/file_model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.executors.client_api_launcher_executor import ClientAPILauncherExecutor
from nvflare.app_common.model_exchange.constants import ModelExchangeFormat
from nvflare.app_opt.pt.decomposers import TensorDecomposer
from nvflare.app_opt.pt.params_converter import NumpyToPTParamsConverter, PTToNumpyParamsConverter
from nvflare.fuel.utils import fobs
class PTClientAPILauncherExecutor(ClientAPILauncherExecutor):
def initialize(self, fl_ctx: FLContext) -> None:
fobs.register(TensorDecomposer)
self._params_exchange_format = ModelExchangeFormat.PYTORCH
super().initialize(fl_ctx)
if self._from_nvflare_converter is None:
self._from_nvflare_converter = NumpyToPTParamsConverter()
if self._to_nvflare_converter is None:
self._to_nvflare_converter = PTToNumpyParamsConverter()
| NVFlare-main | nvflare/app_opt/pt/client_api_launcher_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
from nvflare.apis.dxo import MetaKey
from nvflare.app_common.abstract.model import (
ModelLearnable,
ModelLearnableKey,
make_model_learnable,
validate_model_learnable,
)
from nvflare.app_common.app_constant import ModelFormat
class PTModelPersistenceFormatManager(object):
PERSISTENCE_KEY_MODEL = "model"
PERSISTENCE_KEY_TRAIN_CONF = "train_conf"
PERSISTENCE_KEY_META_PROPS = "meta_props"
def __init__(self, data: dict, default_train_conf=None):
"""Manage the format for model persistence.
Args:
data (dict): either the dictionary mapping variables to values or a dict of dict.
default_train_conf (dict, optional): configuration for train. Defaults to None.
Raises:
TypeError: when data is not a dictionary
"""
if not isinstance(data, dict):
raise TypeError("data must be a dict but got {}".format(type(data)))
self.var_dict = None
self.meta = None
self.train_conf = None
self.other_props = {} # other props from the original data that need to be kept
if self.PERSISTENCE_KEY_MODEL not in data:
# this is a simple weight dict
self.var_dict = data
else:
# dict of dicts
self.var_dict = data[self.PERSISTENCE_KEY_MODEL]
self.meta = data.get(self.PERSISTENCE_KEY_META_PROPS, None)
self.train_conf = data.get(self.PERSISTENCE_KEY_TRAIN_CONF, None)
# we need to keep other props, if any, so they can be kept when persisted
for k, v in data.items():
if k not in [
self.PERSISTENCE_KEY_MODEL,
self.PERSISTENCE_KEY_META_PROPS,
self.PERSISTENCE_KEY_TRAIN_CONF,
]:
self.other_props[k] = v
if not self.train_conf:
self.train_conf = default_train_conf
def _get_processed_vars(self) -> dict:
if self.meta:
return self.meta.get(MetaKey.PROCESSED_KEYS, {})
else:
return {}
def to_model_learnable(self, exclude_vars) -> ModelLearnable:
processed_vars = self._get_processed_vars()
weights = {}
for k, v in self.var_dict.items():
if exclude_vars and exclude_vars.search(k):
continue
is_processed = processed_vars.get(k, False)
if is_processed:
weights[k] = v
else:
weights[k] = v.cpu().numpy()
return make_model_learnable(weights, self.meta)
def to_persistence_dict(self) -> dict:
processed_vars = self._get_processed_vars()
weights_dict = OrderedDict()
for k, v in self.var_dict.items():
is_processed = processed_vars.get(k, False)
if is_processed:
weights_dict[k] = v
else:
weights_dict[k] = torch.as_tensor(v)
# always use complex format for saving
persistence_dict = OrderedDict()
persistence_dict[self.PERSISTENCE_KEY_MODEL] = weights_dict
if self.meta:
persistence_dict[self.PERSISTENCE_KEY_META_PROPS] = self.meta
if self.train_conf:
persistence_dict[self.PERSISTENCE_KEY_TRAIN_CONF] = self.train_conf
if self.other_props:
for k, v in self.other_props.items():
persistence_dict[k] = v
return persistence_dict
def update(self, ml: ModelLearnable):
"""Update the persistence data with the learned values.
Args:
ml (ModelLearnable): updated information to be merged into existing ModelLearnable
"""
err = validate_model_learnable(ml)
if err:
raise ValueError(err)
self.meta = ml.get(ModelLearnableKey.META, None)
# update with value of the model learnable
# note that the original weights that are not learned are still kept!
learned_weights = ml.get(ModelLearnableKey.WEIGHTS, {})
for k, v in learned_weights.items():
self.var_dict[k] = v
@staticmethod
def get_persist_model_format():
return ModelFormat.PT_CHECKPOINT
| NVFlare-main | nvflare/app_opt/pt/model_persistence_format_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
import torch.nn as nn
from nvflare.security.logging import secure_format_exception
def feed_vars(model: nn.Module, model_params):
"""Feed variable values from model_params to pytorch state_dict.
Args:
model (nn.Module): the local pytorch model
model_params: a ModelData message
Returns:
a list of params and a dictionary of vars to params
"""
_logger = logging.getLogger("AssignVariables")
_logger.debug("AssignVariables...")
to_assign = []
n_ext = len(model_params)
_logger.debug(f"n_ext {n_ext}")
local_var_dict = model.state_dict()
for var_name in local_var_dict:
try:
if var_name in tuple(model_params):
nd = model_params[var_name]
to_assign.append(nd)
local_var_dict[var_name] = torch.as_tensor(
nd
) # update local state dict TODO: enable setting of datatype
except Exception as e:
_logger.error(f"feed_vars Exception: {secure_format_exception(e)}")
raise RuntimeError(secure_format_exception(e))
_logger.debug("Updated local variables to be assigned.")
n_assign = len(to_assign)
_logger.info(f"Vars {n_ext} of {n_assign} assigned.")
return to_assign, local_var_dict
| NVFlare-main | nvflare/app_opt/pt/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import torch
from nvflare.app_common.utils.fl_model_utils import ParamsConverter
class NumpyToPTParamsConverter(ParamsConverter):
def convert(self, params: Dict) -> Dict:
return {k: torch.as_tensor(v) for k, v in params.items()}
class PTToNumpyParamsConverter(ParamsConverter):
def convert(self, params: Dict) -> Dict:
return {k: v.cpu().numpy() for k, v in params.items()}
| NVFlare-main | nvflare/app_opt/pt/params_converter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The SCAFFOLD-related functions are based on https://github.com/Xtra-Computing/NIID-Bench
# MIT License
#
# Copyright (c) 2021 Yiqun Diao, Qinbin Li
#
# Copyright (c) 2020 International Business Machines
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import torch
from torch.optim import Optimizer
def get_lr_values(optimizer: Optimizer):
"""
This function is used to get the learning rates of the optimizer.
"""
return [group["lr"] for group in optimizer.state_dict()["param_groups"]]
class PTScaffoldHelper(object):
"""Helper to be used with SCAFFOLD components.
Implements the functions used for the algorithm proposed in
Karimireddy et al. "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning"
(https://arxiv.org/abs/1910.06378) using PyTorch.
SCAFFOLD-related functions are based on https://github.com/Xtra-Computing/NIID-Bench.
See also Li et al. "Federated Learning on Non-IID Data Silos: An Experimental Study"
(https://arxiv.org/abs/2102.02079).
"""
def __init__(self):
# SCAFFOLD control terms
self.cnt = 0
self.c_global = None
self.c_local = None
self.c_delta_para = None
def init(self, model):
# create models for SCAFFOLD correction terms
self.c_global = copy.deepcopy(model)
self.c_local = copy.deepcopy(model)
# Initialize correction term with zeros
c_init_para = model.state_dict()
for k in c_init_para.keys():
c_init_para[k] = torch.zeros_like(c_init_para[k])
self.c_global.load_state_dict(c_init_para)
self.c_local.load_state_dict(c_init_para)
def get_params(self):
self.cnt = 0
# Adapted from https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L371
c_global_para = self.c_global.state_dict()
c_local_para = self.c_local.state_dict()
return c_global_para, c_local_para
def model_update(self, model, curr_lr, c_global_para, c_local_para):
# Update model using scaffold controls
# See https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L391
net_para = model.state_dict()
for key in net_para:
net_para[key] = net_para[key] - curr_lr * (c_global_para[key] - c_local_para[key])
model.load_state_dict(net_para)
self.cnt += 1
def terms_update(self, model, curr_lr, c_global_para, c_local_para, model_global):
# Update the local scaffold controls
# See https://github.com/Xtra-Computing/NIID-Bench/blob/main/experiments.py#L403
c_new_para = self.c_local.state_dict()
self.c_delta_para = copy.deepcopy(self.c_local.state_dict())
global_model_para = model_global.state_dict()
net_para = model.state_dict()
for key in net_para:
c_new_para[key] = (
c_new_para[key] - c_global_para[key] + (global_model_para[key] - net_para[key]) / (self.cnt * curr_lr)
)
self.c_delta_para[key] = (c_new_para[key] - c_local_para[key]).cpu().numpy()
self.c_local.load_state_dict(c_new_para)
def load_global_controls(self, weights):
self.c_global.load_state_dict(weights)
def get_delta_controls(self):
if self.c_delta_para is None:
raise ValueError("c_delta_para hasn't been computed yet!")
return self.c_delta_para
| NVFlare-main | nvflare/app_opt/pt/scaffold.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import os.path
import logging
import os
from typing import Dict
from nv_attestation_sdk.attestation import Attestation, Devices, Environment
class VerifierProp:
DEVICES = "devices" # GPU, CPU, etc.
ENV = "env"
URL = "url"
APPRAISAL_POLICY_FILE = "appraisal_policy_file"
RESULT_POLICY_FILE = "result_policy_file"
class Device:
GPU = "gpu"
CPU = "cpu"
NIC = "nic"
OS = "os"
DPU = "dpu"
mapping = {GPU: Devices.GPU, CPU: Devices.CPU, NIC: Devices.NIC, OS: Devices.OS, DPU: Devices.DPU}
class Env:
TEST = "test"
LOCAL = "local"
AZURE = "azure"
GCP = "gcp"
mapping = {TEST: Environment.TEST, LOCAL: Environment.LOCAL, AZURE: Environment.AZURE, GCP: Environment.GCP}
class CCHelper(object):
def __init__(self, site_name: str, verifiers: list):
"""Create an AttestationHelper instance
Args:
site_name: name of the site
verifiers: dict that specifies verifiers to be used
"""
self.site_name = site_name
self.verifiers = verifiers
attestation = Attestation()
attestation.set_name(site_name)
self.attestation = attestation
self.token = None
self.logger = logging.getLogger(self.__class__.__name__)
for v in verifiers:
assert isinstance(v, dict)
url = None
env = None
devices = 0
appraisal_policy_file = None
result_policy_file = None
for prop, value in v.items():
if prop == VerifierProp.URL:
url = value
elif prop == VerifierProp.ENV:
env = Env.mapping.get(value)
elif prop == VerifierProp.DEVICES:
dv = Device.mapping.get(value)
if not dv:
raise ValueError(f"invalid device '{value}'")
devices = dv
elif prop == VerifierProp.APPRAISAL_POLICY_FILE:
appraisal_policy_file = value
elif prop == VerifierProp.RESULT_POLICY_FILE:
result_policy_file = value
if not env:
raise ValueError("Environment is not specified for verifier")
if not devices:
raise ValueError("Devices is not specified for verifier")
if url is None:
raise ValueError("Url is not specified for verifier")
if appraisal_policy_file is None:
raise ValueError("Appraisal policy file is not specified for verifier")
if not os.path.exists(appraisal_policy_file):
raise ValueError(f"Appraisal policy file '{appraisal_policy_file}' does not exist")
appraisal_policy = open(appraisal_policy_file, "rt").read().rstrip()
if result_policy_file is None:
raise ValueError("Result policy file is not specified for verifier")
if not os.path.exists(result_policy_file):
raise ValueError(f"Result policy file '{result_policy_file}' does not exist")
self.result_policy = open(result_policy_file, "rt").read().rstrip()
attestation.add_verifier(devices, env, url, appraisal_policy)
def reset_participant(self, participant_name: str):
pass
def prepare(self) -> bool:
"""Prepare for attestation process
Returns: error if any
"""
ok = self.attestation.attest()
self.logger.info(f"CC - attest result (is valid?): {ok}")
self.token = self.attestation.get_token(self.site_name)
self.logger.info(f"token {self.token=}")
return True
def get_token(self):
return self.token
def validate_participants(self, participants: Dict[str, str]) -> Dict[str, bool]:
"""Validate CC policies of specified participants against the requirement policy of the site.
Args:
participants: dict of participant name => token
Returns: dict of participant name => bool
"""
if not participants:
return {}
result = {k: self.attestation.validate_token(self.result_policy, v) for k, v in participants.items()}
self.logger.debug(f"CC - results from validating participants' tokens: {result}")
return result
| NVFlare-main | nvflare/app_opt/confidential_computing/cc_helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import AdminCommandNames, FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.fl_exception import UnsafeComponentError
from .cc_helper import CCHelper
PEER_CTX_CC_TOKEN = "_peer_ctx_cc_token"
CC_TOKEN = "_cc_token"
CC_INFO = "_cc_info"
CC_TOKEN_VALIDATED = "_cc_token_validated"
class CCManager(FLComponent):
def __init__(self, verifiers: list):
"""Manage all confidential computing related tasks.
This manager does the following tasks:
obtaining its own GPU CC token
preparing the token to the server
keeping clients' tokens in server
validating all tokens in the entire NVFlare system
Args:
verifiers (list):
each element in this list is a dictionary and the keys of dictionary are
"devices", "env", "url", "appraisal_policy_file" and "result_policy_file."
the values of devices are "gpu" and "cpu"
the values of env are "local" and "test"
currently, valid combination is gpu + local
url must be an empty string
appraisal_policy_file must point to an existing file
currently supports an empty file only
result_policy_file must point to an existing file
currently supports the following content only
.. code-block:: json
{
"version":"1.0",
"authorization-rules":{
"x-nv-gpu-available":true,
"x-nv-gpu-attestation-report-available":true,
"x-nv-gpu-info-fetched":true,
"x-nv-gpu-arch-check":true,
"x-nv-gpu-root-cert-available":true,
"x-nv-gpu-cert-chain-verified":true,
"x-nv-gpu-ocsp-cert-chain-verified":true,
"x-nv-gpu-ocsp-signature-verified":true,
"x-nv-gpu-cert-ocsp-nonce-match":true,
"x-nv-gpu-cert-check-complete":true,
"x-nv-gpu-measurement-available":true,
"x-nv-gpu-attestation-report-parsed":true,
"x-nv-gpu-nonce-match":true,
"x-nv-gpu-attestation-report-driver-version-match":true,
"x-nv-gpu-attestation-report-vbios-version-match":true,
"x-nv-gpu-attestation-report-verified":true,
"x-nv-gpu-driver-rim-schema-fetched":true,
"x-nv-gpu-driver-rim-schema-validated":true,
"x-nv-gpu-driver-rim-cert-extracted":true,
"x-nv-gpu-driver-rim-signature-verified":true,
"x-nv-gpu-driver-rim-driver-measurements-available":true,
"x-nv-gpu-driver-vbios-rim-fetched":true,
"x-nv-gpu-vbios-rim-schema-validated":true,
"x-nv-gpu-vbios-rim-cert-extracted":true,
"x-nv-gpu-vbios-rim-signature-verified":true,
"x-nv-gpu-vbios-rim-driver-measurements-available":true,
"x-nv-gpu-vbios-index-conflict":true,
"x-nv-gpu-measurements-match":true
}
}
"""
FLComponent.__init__(self)
self.site_name = None
self.helper = None
self.verifiers = verifiers
self.my_token = None
self.participant_cc_info = {} # used by the Server to keep tokens of all clients
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.SYSTEM_BOOTSTRAP:
try:
err = self._prepare_for_attestation(fl_ctx)
except:
self.log_exception(fl_ctx, "exception in attestation preparation")
err = "exception in attestation preparation"
finally:
if err:
self.log_critical(fl_ctx, err, fire_event=False)
raise UnsafeComponentError(err)
elif event_type == EventType.BEFORE_CLIENT_REGISTER:
# On client side
self._prepare_token_for_login(fl_ctx)
elif event_type == EventType.CLIENT_REGISTERED:
# Server side
self._add_client_token(fl_ctx)
elif event_type == EventType.AUTHORIZE_COMMAND_CHECK:
command_to_check = fl_ctx.get_prop(key=FLContextKey.COMMAND_NAME)
self.logger.debug(f"Received {command_to_check=}")
if command_to_check == AdminCommandNames.CHECK_RESOURCES:
try:
err = self._client_to_check_participant_token(fl_ctx)
except:
self.log_exception(fl_ctx, "exception in validating participants")
err = "Participants unable to meet client CC requirements"
finally:
if err:
self._not_authorize_job(err, fl_ctx)
elif event_type == EventType.BEFORE_CHECK_CLIENT_RESOURCES:
# Server side
try:
err = self._server_to_check_client_token(fl_ctx)
except:
self.log_exception(fl_ctx, "exception in validating clients")
err = "Clients unable to meet server CC requirements"
finally:
if err:
self._block_job(err, fl_ctx)
elif event_type == EventType.AFTER_CHECK_CLIENT_RESOURCES:
# Server side
fl_ctx.remove_prop(PEER_CTX_CC_TOKEN)
def _prepare_token_for_login(self, fl_ctx: FLContext):
# client side
if self.my_token is None:
self.my_token = self.helper.get_token()
cc_info = {CC_TOKEN: self.my_token}
fl_ctx.set_prop(key=CC_INFO, value=cc_info, sticky=False, private=False)
def _add_client_token(self, fl_ctx: FLContext):
# server side
peer_ctx = fl_ctx.get_peer_context()
token_owner = peer_ctx.get_identity_name()
peer_cc_info = peer_ctx.get_prop(CC_INFO)
self.participant_cc_info[token_owner] = peer_cc_info
self.participant_cc_info[token_owner][CC_TOKEN_VALIDATED] = False
def _prepare_for_attestation(self, fl_ctx: FLContext) -> str:
# both server and client sides
self.site_name = fl_ctx.get_identity_name()
self.helper = CCHelper(site_name=self.site_name, verifiers=self.verifiers)
ok = self.helper.prepare()
if not ok:
return "failed to attest"
self.my_token = self.helper.get_token()
self.participant_cc_info[self.site_name] = {CC_TOKEN: self.my_token, CC_TOKEN_VALIDATED: True}
return ""
def _client_to_check_participant_token(self, fl_ctx: FLContext) -> str:
# Client side
peer_ctx = fl_ctx.get_peer_context()
if peer_ctx is None:
return f"Empty peer context in {self.site_name=}"
participants_to_validate = peer_ctx.get_prop(PEER_CTX_CC_TOKEN, None)
if not participants_to_validate:
return "missing PEER_CTX_CC_TOKEN prop in peer context"
if not isinstance(participants_to_validate, dict):
return (
f"bad PEER_CTX_CC_TOKEN prop in peer context: must be a dict but got {type(participants_to_validate)}"
)
if not participants_to_validate:
return ""
return self._validate_participants_tokens(participants_to_validate)
def _server_to_check_client_token(self, fl_ctx: FLContext) -> str:
participants = fl_ctx.get_prop(FLContextKey.JOB_PARTICIPANTS)
if not participants:
return f"missing '{FLContextKey.JOB_PARTICIPANTS}' prop in fl_ctx"
if not isinstance(participants, list):
return f"bad value for {FLContextKey.JOB_PARTICIPANTS} in fl_ctx: expect list bot got {type(participants)}"
participant_tokens = {self.site_name: self.my_token}
for p in participants:
assert isinstance(p, str)
if p == self.site_name:
continue
if p not in self.participant_cc_info:
return f"no token available for participant {p}"
participant_tokens[p] = self.participant_cc_info[p][CC_TOKEN]
err = self._validate_participants_tokens(participant_tokens)
if err:
return err
for p in participant_tokens:
self.participant_cc_info[p][CC_TOKEN_VALIDATED] = True
fl_ctx.set_prop(key=PEER_CTX_CC_TOKEN, value=participant_tokens, sticky=True, private=False)
self.logger.debug(f"{self.site_name=} set PEER_CTX_CC_TOKEN with {participant_tokens=}")
return ""
def _validate_participants_tokens(self, participants) -> str:
self.logger.debug(f"Validating participant tokens {participants=}")
result = self.helper.validate_participants(participants)
assert isinstance(result, dict)
for p in result:
self.participant_cc_info[p] = {CC_TOKEN: participants[p], CC_TOKEN_VALIDATED: True}
invalid_participant_list = [k for k, v in self.participant_cc_info.items() if v[CC_TOKEN_VALIDATED] is False]
if invalid_participant_list:
invalid_participant_string = ",".join(invalid_participant_list)
self.logger.debug(f"{invalid_participant_list=}")
return f"Participant {invalid_participant_string} not meeting CC requirements"
else:
return ""
def _not_authorize_job(self, reason: str, fl_ctx: FLContext):
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID, "")
self.log_error(fl_ctx, f"Job {job_id} is blocked: {reason}")
fl_ctx.set_prop(key=FLContextKey.AUTHORIZATION_REASON, value=reason)
fl_ctx.set_prop(key=FLContextKey.AUTHORIZATION_RESULT, value=False)
def _block_job(self, reason: str, fl_ctx: FLContext):
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID, "")
self.log_error(fl_ctx, f"Job {job_id} is blocked: {reason}")
fl_ctx.set_prop(key=FLContextKey.JOB_BLOCK_REASON, value=reason)
fl_ctx.set_prop(key=FLContextKey.AUTHORIZATION_RESULT, value=False)
| NVFlare-main | nvflare/app_opt/confidential_computing/cc_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/confidential_computing/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/tracking/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/tracking/tb/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer import LogWriter
from nvflare.app_common.tracking.tracker_types import LogWriterName
from nvflare.app_common.widgets.streaming import ANALYTIC_EVENT_TYPE
class TBWriter(LogWriter):
def __init__(self, event_type=ANALYTIC_EVENT_TYPE):
"""Sends experiment tracking data.
Args:
event_type (str): event type to fire.
"""
super().__init__(event_type)
def get_writer_name(self) -> LogWriterName:
return LogWriterName.TORCH_TB
def add_scalar(self, tag: str, scalar: float, global_step: Optional[int] = None, **kwargs):
"""Sends a scalar.
Args:
tag (str): Data identifier.
scalar (float): Value to send.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.sender.add(tag=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, global_step=global_step, **kwargs)
def add_scalars(self, tag: str, scalars: dict, global_step: Optional[int] = None, **kwargs):
"""Sends scalars.
Args:
tag (str): The parent name for the tags.
scalars (dict): Key-value pair storing the tag and corresponding values.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.sender.add(tag=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, global_step=global_step, **kwargs)
def flush(self):
"""Flushes out the message.
This does nothing, it is defined to mimic the PyTorch SummaryWriter behavior.
"""
pass
| NVFlare-main | nvflare/app_opt/tracking/tb/tb_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer_me import LogWriterForMetricsExchanger
from nvflare.app_common.tracking.tracker_types import LogWriterName
class TBWriterForMetricsExchanger(LogWriterForMetricsExchanger):
"""Sends experiment tracking data through MetricsExchanger."""
def get_writer_name(self) -> LogWriterName:
return LogWriterName.TORCH_TB
def add_scalar(self, tag: str, scalar: float, global_step: Optional[int] = None, **kwargs):
"""Sends a scalar.
Args:
tag (str): Data identifier.
scalar (float): Value to send.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.log(key=tag, value=scalar, data_type=AnalyticsDataType.SCALAR, global_step=global_step, **kwargs)
def add_scalars(self, tag: str, scalars: dict, global_step: Optional[int] = None, **kwargs):
"""Sends scalars.
Args:
tag (str): The parent name for the tags.
scalars (dict): Key-value pair storing the tag and corresponding values.
global_step (optional, int): Global step value.
**kwargs: Additional arguments to pass to the receiver side.
"""
self.log(key=tag, value=scalars, data_type=AnalyticsDataType.SCALARS, global_step=global_step, **kwargs)
def flush(self):
"""Flushes out the message.
This does nothing, it is defined to mimic the PyTorch SummaryWriter behavior.
"""
pass
| NVFlare-main | nvflare/app_opt/tracking/tb/tb_writer_metrics_exchanger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Optional
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.analytix import AnalyticsData, AnalyticsDataType
from nvflare.apis.dxo import from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.widgets.streaming import AnalyticsReceiver
FUNCTION_MAPPING = {
AnalyticsDataType.SCALAR: "add_scalar",
AnalyticsDataType.TEXT: "add_text",
AnalyticsDataType.IMAGE: "add_image",
AnalyticsDataType.SCALARS: "add_scalars",
}
class TBAnalyticsReceiver(AnalyticsReceiver):
def __init__(self, tb_folder="tb_events", events: Optional[List[str]] = None):
"""Receives analytics data to save to TensorBoard.
Args:
tb_folder (str): the folder to store tensorboard files.
events (optional, List[str]): A list of events to be handled by this receiver.
.. code-block:: text
:caption: Folder structure
Inside run_XX folder:
- workspace
- run_01 (already created):
- output_dir (default: tb_events):
- peer_name_1:
- peer_name_2:
- run_02 (already created):
- output_dir (default: tb_events):
- peer_name_1:
- peer_name_2:
"""
super().__init__(events=events)
self.writers_table = {}
self.tb_folder = tb_folder
self.root_log_dir = None
def initialize(self, fl_ctx: FLContext):
workspace = fl_ctx.get_engine().get_workspace()
run_dir = workspace.get_run_dir(fl_ctx.get_job_id())
root_log_dir = os.path.join(run_dir, self.tb_folder)
os.makedirs(root_log_dir, exist_ok=True)
self.root_log_dir = root_log_dir
def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin):
dxo = from_shareable(shareable)
analytic_data = AnalyticsData.from_dxo(dxo)
if not analytic_data:
return
writer = self.writers_table.get(record_origin)
if writer is None:
peer_log_dir = os.path.join(self.root_log_dir, record_origin)
writer = SummaryWriter(log_dir=peer_log_dir)
self.writers_table[record_origin] = writer
# do different things depending on the type in dxo
self.log_debug(
fl_ctx,
f"save data {analytic_data} from {record_origin}",
fire_event=False,
)
func_name = FUNCTION_MAPPING.get(analytic_data.data_type, None)
if func_name is None:
self.log_error(fl_ctx, f"The data_type {analytic_data.data_type} is not supported.", fire_event=False)
return
func = getattr(writer, func_name)
if analytic_data.step:
func(analytic_data.tag, analytic_data.value, analytic_data.step)
else:
func(analytic_data.tag, analytic_data.value)
def finalize(self, fl_ctx: FLContext):
for writer in self.writers_table.values():
writer.flush()
writer.close()
| NVFlare-main | nvflare/app_opt/tracking/tb/tb_receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/tracking/mlflow/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import timeit
from typing import Dict, Optional
import mlflow
from mlflow.entities import Metric, Param, RunTag
from mlflow.tracking.client import MlflowClient
from mlflow.utils.time_utils import get_current_time_millis
from nvflare.apis.analytix import AnalyticsData, AnalyticsDataType
from nvflare.apis.dxo import from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.tracking.track_exception import ExpTrackingException
from nvflare.app_common.tracking.tracker_types import ANALYTIC_EVENT_TYPE, LogWriterName, TrackConst
from nvflare.app_common.widgets.streaming import AnalyticsReceiver
class MlflowConstants:
EXPERIMENT_TAG = "experiment_tag"
RUN_TAG = "run_tag"
EXPERIMENT_NAME = "experiment_name"
class MLflowReceiver(AnalyticsReceiver):
def __init__(
self,
tracking_uri: Optional[str] = None,
kwargs: Optional[dict] = None,
artifact_location: Optional[str] = None,
events=None,
buffer_flush_time=1,
):
"""MLflowReceiver receives log events from clients and deliver them to the MLflow tracking server.
Args:
tracking_uri (Optional[str], optional): MLflow tracking server URI. When this is not specified, the metrics will be written to the local file system.
If the tracking URI is specified, the MLflow tracking server must started before running the job. Defaults to None.
kwargs (Optional[dict], optional): keyword arguments:
"experiment_name" (str): Specifies the experiment name. If not specified, the default name of "FLARE FL Experiment" will be used.
"run_name" (str): Specifies the run name
"experiment_tags" (dict): Tags used when creating the MLflow experiment.
"mlflow.note.content" is a special MLflow tag. When provided, it displays as experiment
description field on the MLflow UI. You can use Markdown syntax for the description.
"run_tags" (str): Tags used when creating the MLflow run. "mlflow.note.content" is a special MLflow tag.
When provided, it displays as run description field on the MLflow UI.
You can use Markdown syntax for the description.
artifact_location (Optional[str], optional): Relative location of artifacts. Currently only text is supported at the moment.
events (_type_, optional): The event the receiver is listening to. By default, it listens to "fed.analytix_log_stats".
buffer_flush_time (int, optional): The time in seconds between deliveries of event data to the MLflow tracking server. The
data is buffered and then delivered to the MLflow tracking server in batches, and
the buffer_flush_time controls the frequency of the sending. By default, the buffer
flushes every second. You can reduce the time to a fraction of a second if you prefer
less delay. Keep in mind that reducing the buffer_flush_time will potentially cause high
traffic to the MLflow tracking server, which in some cases can actually cause more latency.
"""
if events is None:
events = ["fed." + ANALYTIC_EVENT_TYPE]
super().__init__(events=events)
self.artifact_location = artifact_location
self.fl_ctx = None
self.kwargs = kwargs if kwargs else {}
self.tracking_uri = tracking_uri
self.mlflow = mlflow
self.mlflow_clients: Dict[str, MlflowClient] = {}
self.experiment_id = None
self.run_ids = {}
self.buffer = {}
self.time_start = 0
self.time_since_flush = 0
self.buff_flush_time = buffer_flush_time
if self.tracking_uri:
mlflow.set_tracking_uri(uri=self.tracking_uri)
def initialize(self, fl_ctx: FLContext):
"""Initializes MlflowClient for each site.
An MlflowClient for each client site is created, an experiment is created, and a run is created.
The kwargs in the params for MLflowReceiver for "experiment_name" and "experiment_tags" are used for the experiment if
provided. The "run_tags" are used for the run tags as well as "job_id" and "run_name" which are automatically generated.
The "run_name" from kwargs is concatenated after the site name and job_id: {site_name}-{job_id_tag}-{run_name}.
Args:
fl_ctx (FLContext): the FLContext
"""
self.fl_ctx = fl_ctx
self.time_start = 0
art_full_path = self.get_artifact_location(self.artifact_location)
experiment_name = self.kwargs.get(TrackConst.EXPERIMENT_NAME, "FLARE FL Experiment")
experiment_tags = self._get_tags(TrackConst.EXPERIMENT_TAGS, kwargs=self.kwargs)
sites = fl_ctx.get_engine().get_clients()
self._init_buffer(sites)
self.mlflow_setup(art_full_path, experiment_name, experiment_tags, sites)
def mlflow_setup(self, art_full_path, experiment_name, experiment_tags, sites):
"""Set up an MlflowClient for each client site and create an experiment and run.
Args:
art_full_path (str): Full path to artifacts.
experiment_name (str): Experiment name.
experiment_tags (dict): Experiment tags.
sites (List[Client]): List of client sites.
"""
for site in sites:
mlflow_client = self.mlflow_clients.get(site.name, None)
if not mlflow_client:
mlflow_client = MlflowClient()
self.mlflow_clients[site.name] = mlflow_client
self.experiment_id = self._create_experiment(
mlflow_client, experiment_name, art_full_path, experiment_tags
)
run_group_id = str(int(time.time()))
default_run_name = "FLARE FL Run"
run_name = self.get_run_name(self.kwargs, default_run_name, site.name, run_group_id)
tags = self.get_run_tags(self.kwargs, run_group_id, run_name)
run = mlflow_client.create_run(experiment_id=self.experiment_id, run_name=run_name, tags=tags)
self.run_ids[site.name] = run.info.run_id
def _init_buffer(self, sites):
"""For each site, create a buffer (dict) consisting of a list each for metrics, parameters, and tags."""
for site in sites:
self.buffer[site.name] = {
AnalyticsDataType.METRICS: [],
AnalyticsDataType.PARAMETERS: [],
AnalyticsDataType.TAGS: [],
}
def get_run_name(self, kwargs: dict, default_name: str, site_name: str, run_group_id: str):
run_name = kwargs.get(TrackConst.RUN_NAME, default_name)
job_id_tag = self.get_job_id_tag(group_id=run_group_id)
return f"{site_name}-{job_id_tag[:6]}-{run_name}"
def get_run_tags(self, kwargs, run_group_id, run_name: str):
run_tags = self._get_tags(TrackConst.RUN_TAGS, kwargs=kwargs)
run_tags["job_id"] = self.get_job_id_tag(group_id=run_group_id)
run_tags["run_name"] = run_name
return run_tags
def get_job_id_tag(self, group_id: str) -> str:
job_id = self.fl_ctx.get_job_id()
if job_id == "simulate_job":
# Since all jobs run in the simulator have the same job_id of "simulate_job", use group_id instead
job_id = group_id
return job_id
def _get_tags(self, tag_key: str, kwargs: dict):
tags = {}
if tag_key in kwargs:
tags = kwargs[tag_key]
if not isinstance(tags, dict):
raise ValueError(f"argument error: value for key:'{tag_key}' is expecting type of dict")
else:
print("tag key: ", tag_key, " not found in kwargs: ", kwargs)
return tags if tags else {}
def get_artifact_location(self, relative_path: str):
workspace = self.fl_ctx.get_engine().get_workspace()
run_dir = workspace.get_run_dir(self.fl_ctx.get_job_id())
root_log_dir = os.path.join(run_dir, relative_path)
return root_log_dir
def _create_experiment(
self,
mlflow_client: MlflowClient,
experiment_name: str,
artifact_location: str,
experiment_tags: Optional[dict] = None,
) -> Optional[str]:
experiment_id = None
if experiment_name:
experiment = mlflow_client.get_experiment_by_name(name=experiment_name)
if not experiment:
self.logger.info(f"Experiment with name '{experiment_name}' does not exist. Creating a new experiment.")
try:
import pathlib
artifact_location_uri = pathlib.Path(artifact_location).as_uri()
experiment_id = mlflow_client.create_experiment(
name=experiment_name, artifact_location=artifact_location_uri, tags=experiment_tags
)
except Exception as e:
raise ExpTrackingException(
f"Could not create an MLflow Experiment with name {experiment_name}. {e}"
)
experiment = mlflow_client.get_experiment_by_name(name=experiment_name)
else:
experiment_id = experiment.experiment_id
self.logger.info(f"Experiment={experiment}")
return experiment_id
def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin: str):
if self.time_start == 0:
self.time_start = timeit.default_timer()
dxo = from_shareable(shareable)
data = AnalyticsData.from_dxo(dxo, receiver=LogWriterName.MLFLOW)
if not data:
return
if data.data_type == AnalyticsDataType.TEXT:
mlflow_client = self.get_mlflow_client(record_origin)
if not mlflow_client:
raise RuntimeError(f"mlflow client is None for site {record_origin}.")
run_id = self.get_run_id(record_origin)
if data.kwargs.get("path", None):
mlflow_client.log_text(run_id=run_id, text=data.value, artifact_file=data.kwargs.get("path"))
elif data.data_type == AnalyticsDataType.MODEL:
# not currently supported
pass
elif data.data_type == AnalyticsDataType.IMAGE:
# not currently supported
pass
else:
self.buffer_data(data, record_origin)
self.time_since_flush += timeit.default_timer() - self.time_start
if self.time_since_flush >= self.buff_flush_time:
self.flush_buffer(record_origin)
def buffer_data(self, data: AnalyticsData, record_origin: str) -> None:
"""Buffer the data to send later.
A buffer for each data_type is in each site_buffer, all of which are in self.buffer
Args:
data (AnalyticsData): Data.
record_origin (str): Origin of the data, or site name.
"""
site_buffer = self.buffer[record_origin]
target_type = self.get_target_type(data.data_type)
buf = site_buffer[target_type]
if data.data_type == AnalyticsDataType.PARAMETER:
buf.append(Param(data.tag, str(data.value)))
elif data.data_type == AnalyticsDataType.TAG:
buf.append(RunTag(data.tag, str(data.value)))
elif data.data_type == AnalyticsDataType.METRIC:
buf.append(Metric(data.tag, data.value, get_current_time_millis(), data.step or 0))
elif data.data_type == AnalyticsDataType.PARAMETERS:
for k, v in data.value.items():
buf.append(Param(k, str(v)))
elif data.data_type == AnalyticsDataType.TAGS:
for k, v in data.value.items():
buf.append(RunTag(k, str(v)))
elif data.data_type == AnalyticsDataType.METRICS:
for k, v in data.value.items():
buf.append(Metric(k, v, get_current_time_millis(), data.step or 0))
def get_target_type(self, data_type: AnalyticsDataType):
if data_type == AnalyticsDataType.METRIC:
return AnalyticsDataType.METRICS
elif data_type == AnalyticsDataType.PARAMETER:
return AnalyticsDataType.PARAMETERS
elif data_type == AnalyticsDataType.TAG:
return AnalyticsDataType.TAGS
else:
return data_type
def flush_buffer(self, record_origin):
"""Flush the buffer and send all the data to the MLflow tracking server.
Args:
record_origin (str): Origin of the data, or site name.
"""
mlflow_client = self.get_mlflow_client(record_origin)
if not mlflow_client:
raise RuntimeError(f"mlflow client is None for site {record_origin}.")
run_id = self.get_run_id(record_origin)
site_buff = self.buffer[record_origin]
metrics_arr = self.pop_from_buffer(site_buff[AnalyticsDataType.METRICS])
params_arr = self.pop_from_buffer(site_buff[AnalyticsDataType.PARAMETERS])
tags_arr = self.pop_from_buffer(site_buff[AnalyticsDataType.TAGS])
mlflow_client.log_batch(run_id=run_id, metrics=metrics_arr, params=params_arr, tags=tags_arr)
self.time_start = 0
self.time_since_flush = 0
def pop_from_buffer(self, log_buffer):
item_arr = []
for _ in range(len(log_buffer)):
item_arr.append(log_buffer.pop())
return item_arr
def finalize(self, fl_ctx: FLContext):
for site_name in self.buffer:
self.flush_buffer(site_name)
for site_name in self.run_ids:
run_id = self.run_ids[site_name]
mlflow_client = self.mlflow_clients[site_name]
if run_id:
mlflow_client.set_terminated(run_id)
def get_run_id(self, site_id: str) -> str:
return self.run_ids.get(site_id, None)
def get_mlflow_client(self, site_id: str) -> MlflowClient:
return self.mlflow_clients.get(site_id, None)
| NVFlare-main | nvflare/app_opt/tracking/mlflow/mlflow_receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer import LogWriter
from nvflare.app_common.tracking.tracker_types import LogWriterName
from nvflare.app_common.widgets.streaming import ANALYTIC_EVENT_TYPE
class MLflowWriter(LogWriter):
def __init__(self, event_type: str = ANALYTIC_EVENT_TYPE):
"""MLflowWriter mimics the usage of mlflow.
Users can replace the import of mlflow with MLflowWriter. They would then use
MLflowWriter the same as they would use mlflow. MLflowWriter will send log records to
the receiver.
Args:
event_type (str, optional): _description_. Defaults to ANALYTIC_EVENT_TYPE.
"""
super().__init__(event_type)
def get_writer_name(self) -> LogWriterName:
"""Returns "MLFLOW"."""
return LogWriterName.MLFLOW
def log_param(self, key: str, value: any) -> None:
"""Log a parameter (e.g. model hyperparameter) under the current run.
Args:
key (str): Parameter name. This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores support keys up to length 250, but some may
support larger keys.
value (any): Parameter value, of type string, but will be string-ified if not.
All backend stores support values up to length 500, but some
may support larger values.
"""
self.sender.add(tag=key, value=value, data_type=AnalyticsDataType.PARAMETER)
def log_params(self, values: dict) -> None:
"""Log a batch of params for the current run.
Args:
values (dict): Dictionary of param_name: String -> value: (String, but will be string-ified if not)
"""
self.sender.add(tag="params", value=values, data_type=AnalyticsDataType.PARAMETERS)
def log_metric(self, key: str, value: float, step: Optional[int] = None) -> None:
"""Log a metric under the current run.
Args:
key (str): Metric name. This string may only contain alphanumerics, underscores (_), dashes (-),
periods (.), spaces ( ), and slashes (/). All backend stores will support keys up to length 250,
but some may support larger keys.
value (float): Metric value. Note that some special values such as +/- Infinity may be replaced by other
values depending on the store. For example, the SQLAlchemy store replaces +/- Infinity with
max / min float values. All backend stores will support values up to length 5000, but some may
support larger values.
step (int, optional): Metric step. Defaults to zero if unspecified.
"""
self.sender.add(tag=key, value=value, data_type=AnalyticsDataType.METRIC, global_step=step)
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""Log multiple metrics for the current run.
Args:
metrics (dict): Dictionary of metric_name: String -> value: Float. Note that some special values such as +/-
Infinity may be replaced by other values depending on the store. For example, sql based store
may replace +/- Infinity with max / min float values.
step (int, optional): A single integer step at which to log the specified Metrics. If unspecified, each metric is
logged at step zero.
"""
self.sender.add(tag="metrics", value=metrics, data_type=AnalyticsDataType.METRICS, global_step=step)
def log_text(self, text: str, artifact_file_path: str) -> None:
"""Log text as an artifact under the current run.
Args:
text (str): String of text to log.
artifact_file_path (str): The run-relative artifact file path in posixpath format
to which the text is saved (e.g. “dir/file.txt”).
"""
self.sender.add(tag="text", value=text, data_type=AnalyticsDataType.TEXT, path=artifact_file_path)
def set_tag(self, key: str, tag: any) -> None:
"""Set a tag under the current run.
Args:
key (str): Name of the tag.
tag (any): Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
"""
self.sender.add(tag=key, value=tag, data_type=AnalyticsDataType.TAG)
def set_tags(self, tags: dict) -> None:
"""Log a batch of tags for the current run.
Args:
tags (dict): Dictionary of tag_name: String -> value: (String, but will be string-ified if
not)
"""
self.sender.add(tag="tags", value=tags, data_type=AnalyticsDataType.TAGS)
| NVFlare-main | nvflare/app_opt/tracking/mlflow/mlflow_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer_me import LogWriterForMetricsExchanger
from nvflare.app_common.tracking.tracker_types import LogWriterName
class MLflowWriterForMetricsExchanger(LogWriterForMetricsExchanger):
"""MLflowWriter mimics the usage of mlflow.
Users can replace the import of mlflow with MLflowWriter. They would then use
MLflowWriter the same as they would use mlflow. MLflowWriter will send log records to
the receiver through MetricsExchanger.
"""
def get_writer_name(self) -> LogWriterName:
"""Returns "MLFLOW"."""
return LogWriterName.MLFLOW
def log_param(self, key: str, value: any) -> None:
"""Log a parameter (e.g. model hyperparameter) under the current run.
Args:
key (str): Parameter name. This string may only contain alphanumerics,
underscores (_), dashes (-), periods (.), spaces ( ), and slashes (/).
All backend stores support keys up to length 250, but some may
support larger keys.
value (any): Parameter value, of type string, but will be string-ified if not.
All backend stores support values up to length 500, but some
may support larger values.
"""
self.log(key=key, value=value, data_type=AnalyticsDataType.PARAMETER)
def log_params(self, values: dict) -> None:
"""Log a batch of params for the current run.
Args:
values (dict): Dictionary of param_name: String -> value: (String, but will be string-ified if not)
"""
self.log(key="params", value=values, data_type=AnalyticsDataType.PARAMETERS)
def log_metric(self, key: str, value: float, step: Optional[int] = None) -> None:
"""Log a metric under the current run.
Args:
key (str): Metric name. This string may only contain alphanumerics, underscores (_), dashes (-),
periods (.), spaces ( ), and slashes (/). All backend stores will support keys up to length 250,
but some may support larger keys.
value (float): Metric value. Note that some special values such as +/- Infinity may be replaced by other
values depending on the store. For example, the SQLAlchemy store replaces +/- Infinity with
max / min float values. All backend stores will support values up to length 5000, but some may
support larger values.
step (int, optional): Metric step. Defaults to zero if unspecified.
"""
self.log(key=key, value=value, data_type=AnalyticsDataType.METRIC, global_step=step)
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""Log multiple metrics for the current run.
Args:
metrics (dict): Dictionary of metric_name: String -> value: Float. Note that some special values such as +/-
Infinity may be replaced by other values depending on the store. For example, sql based store
may replace +/- Infinity with max / min float values.
step (int, optional): A single integer step at which to log the specified Metrics. If unspecified, each metric is
logged at step zero.
"""
self.log(key="metrics", value=metrics, data_type=AnalyticsDataType.METRICS, global_step=step)
def log_text(self, text: str, artifact_file_path: str) -> None:
"""Log text as an artifact under the current run.
Args:
text (str): String of text to log.
artifact_file_path (str): The run-relative artifact file path in posixpath format
to which the text is saved (e.g. “dir/file.txt”).
"""
self.log(key="text", value=text, data_type=AnalyticsDataType.TEXT, path=artifact_file_path)
def set_tag(self, key: str, tag: any) -> None:
"""Set a tag under the current run.
Args:
key (str): Name of the tag.
tag (any): Tag value (string, but will be string-ified if not).
All backend stores will support values up to length 5000, but some
may support larger values.
"""
self.log(key=key, value=tag, data_type=AnalyticsDataType.TAG)
def set_tags(self, tags: dict) -> None:
"""Log a batch of tags for the current run.
Args:
tags (dict): Dictionary of tag_name: String -> value: (String, but will be string-ified if
not)
"""
self.log(key="tags", value=tags, data_type=AnalyticsDataType.TAGS)
| NVFlare-main | nvflare/app_opt/tracking/mlflow/mlflow_writer_metrics_exchanger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from multiprocessing import Process, Queue
from typing import NamedTuple, Optional
import wandb
from nvflare.apis.analytix import AnalyticsData, AnalyticsDataType
from nvflare.apis.dxo import from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.tracking.tracker_types import LogWriterName
from nvflare.app_common.widgets.streaming import AnalyticsReceiver
class WandBTask(NamedTuple):
task_owner: str
task_type: str
task_data: dict
step: int
class WandBReceiver(AnalyticsReceiver):
def __init__(self, kwargs: dict, mode: str = "offline", events=None, process_timeout=10):
if events is None:
events = ["fed.analytix_log_stats"]
super().__init__(events=events)
self.fl_ctx = None
self.mode = mode
self.kwargs = kwargs
self.queues = {}
self.processes = {}
self.process_timeout = process_timeout
# os.environ["WANDB_API_KEY"] = YOUR_KEY_HERE
os.environ["WANDB_MODE"] = self.mode
def job(self, queue):
cnt = 0
run = None
try:
while True:
wandb_task: WandBTask = queue.get()
cnt += 1
if wandb_task.task_type == "stop":
self.log_info(self.fl_ctx, f"received request to stop at {wandb_task.task_owner} for run {run}")
break
elif wandb_task.task_type == "init":
self.log_info(self.fl_ctx, f"received request to init at {wandb_task.task_owner}")
run = wandb.init(**wandb_task.task_data)
elif wandb_task.task_type == "log":
if cnt % 500 == 0:
self.log_info(self.fl_ctx, f"process task : {wandb_task}, cnt = {cnt}")
if wandb_task.step:
wandb.log(wandb_task.task_data, wandb_task.step)
else:
wandb.log(wandb_task.task_data)
finally:
if run:
run.finish()
def initialize(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
sites = fl_ctx.get_engine().get_clients()
run_group_id = str(int(time.time()))
run_name = self.kwargs["name"]
job_id_tag = self.get_job_id_tag(run_group_id)
wand_config = self.kwargs.get("config", {})
for site in sites:
self.log_info(self.fl_ctx, f"initialize WandB run for site {site.name}")
self.kwargs["name"] = f"{site.name}-{job_id_tag[:6]}-{run_name}"
self.kwargs["group"] = f"{run_name}-{job_id_tag}"
wand_config["job_id"] = job_id_tag
wand_config["client"] = site.name
wand_config["run_name"] = run_name
self.check_kwargs(self.kwargs)
q = Queue()
wandb_task = WandBTask(task_owner=site.name, task_type="init", task_data=self.kwargs, step=0)
# q.put_nowait(wandb_task)
q.put(wandb_task)
self.queues[site.name] = q
p = Process(target=self.job, args=(q,))
self.processes[site.name] = p
p.start()
time.sleep(0.2)
def get_job_id_tag(self, group_id: str) -> str:
job_id = self.fl_ctx.get_job_id()
if job_id == "simulate_job":
# For simulator, the job ID is the same so we use a string of the time for the job_id_tag
job_id = group_id
return job_id
def save(self, fl_ctx: FLContext, shareable: Shareable, record_origin: str):
dxo = from_shareable(shareable)
data = AnalyticsData.from_dxo(dxo, receiver=LogWriterName.WANDB)
if not data:
return
q: Optional[Queue] = self.get_job_queue(record_origin)
if q:
if data.data_type == AnalyticsDataType.PARAMETER or data.data_type == AnalyticsDataType.METRIC:
log_data = {data.tag: data.value}
q.put(WandBTask(task_owner=record_origin, task_type="log", task_data=log_data, step=data.step))
elif data.data_type == AnalyticsDataType.PARAMETERS or data.data_type == AnalyticsDataType.METRICS:
q.put(WandBTask(task_owner=record_origin, task_type="log", task_data=data.value, step=data.step))
def finalize(self, fl_ctx: FLContext):
"""Called at EventType.END_RUN.
Args:
fl_ctx (FLContext): the FLContext
"""
for site in self.processes:
self.log_info(self.fl_ctx, f"inform {site} to stop")
q: Optional[Queue] = self.get_job_queue(site)
q.put(WandBTask(task_owner=site, task_type="stop", task_data={}, step=0))
for site in self.processes:
p = self.processes[site]
p.join(self.process_timeout)
p.terminate()
def get_job_queue(self, record_origin):
return self.queues.get(record_origin, None)
def check_kwargs(self, kwargs):
if "project" not in kwargs:
raise ValueError("must provide `project' value")
if "group" not in kwargs:
raise ValueError("must provide `group' value")
if "job_type" not in kwargs:
raise ValueError("must provide `job_type' value")
| NVFlare-main | nvflare/app_opt/tracking/wandb/wandb_receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/tracking/wandb/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer import LogWriter
from nvflare.app_common.tracking.tracker_types import LogWriterName
from nvflare.app_common.widgets.streaming import ANALYTIC_EVENT_TYPE
class WandBWriter(LogWriter):
def __init__(self, event_type: str = ANALYTIC_EVENT_TYPE):
super().__init__(event_type)
def get_writer_name(self) -> LogWriterName:
"""Returns "WEIGHTS_AND_BIASES"."""
return LogWriterName.WANDB
def log(self, metrics: Dict[str, float], step: Optional[int] = None):
"""Log multiple metrics for the current run.
Args:
metrics (Dict[str, float]): Dictionary of metric_name of type String to Float values.
step (int, optional): A single integer step at which to log the specified Metrics.
"""
self.sender.add(tag="metrics", value=metrics, data_type=AnalyticsDataType.METRICS, global_step=step)
| NVFlare-main | nvflare/app_opt/tracking/wandb/wandb_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer_me import LogWriterForMetricsExchanger
from nvflare.app_common.tracking.tracker_types import LogWriterName
class WandBWriterForMetricsExchanger(LogWriterForMetricsExchanger):
"""Sends experiment tracking data through MetricsExchanger."""
def get_writer_name(self) -> LogWriterName:
"""Returns "WEIGHTS_AND_BIASES"."""
return LogWriterName.WANDB
def log(self, metrics: Dict[str, float], step: Optional[int] = None):
"""Log multiple metrics for the current run.
Args:
metrics (Dict[str, float]): Dictionary of metric_name of type String to Float values.
step (int, optional): A single integer step at which to log the specified Metrics.
"""
super().log(key="metrics", value=metrics, data_type=AnalyticsDataType.METRICS, global_step=step)
| NVFlare-main | nvflare/app_opt/tracking/wandb/wandb_writer_metrics_exchanger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/psi/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List, Optional
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.task_handler import TaskHandler
from nvflare.app_common.app_constant import PSIConst
from nvflare.app_common.psi.psi_spec import PSI
from nvflare.app_opt.psi.dh_psi.dh_psi_client import PSIClient
from nvflare.app_opt.psi.dh_psi.dh_psi_server import PSIServer
def check_items_uniqueness(items):
duplicates = {item: count for item, count in collections.Counter(items).items() if count > 1}
if duplicates:
raise ValueError(f"the items must be unique, the following items with duplicates {duplicates}")
class DhPSITaskHandler(TaskHandler):
"""Executor for Diffie-Hellman-based Algorithm PSI.
It handles the communication and FLARE server task delegation
User will write an interface local component : PSI to provide client items and get intersection
"""
def __init__(self, local_psi_id: str):
super().__init__(local_psi_id, PSI)
self.bloom_filter_fpr = None
self.psi_client = None
self.psi_server = None
self.intersects: Optional[List[str]] = None
self.local_psi_handler: Optional[PSI] = None
self.client_name = None
self.items = None
def initialize(self, fl_ctx: FLContext):
super().initialize(fl_ctx)
self.local_psi_handler = self.local_comp
def execute_task(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
client_name = fl_ctx.get_identity_name()
self.client_name = client_name
self.log_info(fl_ctx, f"Executing task '{task_name}' for {client_name}")
if PSIConst.TASK == task_name:
psi_stage_task = shareable.get(PSIConst.TASK_KEY)
self.log_info(fl_ctx, f"Executing psi_stage_task {psi_stage_task} for {client_name}")
if psi_stage_task == PSIConst.TASK_PREPARE:
self.bloom_filter_fpr = shareable[PSIConst.BLOOM_FILTER_FPR]
items = self.get_items()
self.psi_client = PSIClient(items)
self.psi_server = PSIServer(items, self.bloom_filter_fpr)
return self.get_items_size()
else:
if psi_stage_task == PSIConst.TASK_SETUP:
return self.setup(shareable, client_name)
elif psi_stage_task == PSIConst.TASK_REQUEST:
return self.create_request(shareable)
elif psi_stage_task == PSIConst.TASK_RESPONSE:
return self.process_request(shareable)
elif psi_stage_task == PSIConst.TASK_INTERSECT:
return self.calculate_intersection(shareable)
else:
raise RuntimeError(ReturnCode.TASK_UNKNOWN)
def create_request(self, shareable: Shareable):
setup_msg = shareable.get(PSIConst.SETUP_MSG)
self.psi_client.receive_setup(setup_msg)
request = self.psi_client.get_request(self.get_items())
result = Shareable()
result[PSIConst.REQUEST_MSG] = request
return result
def setup(self, shareable: Shareable, client_name: str):
items = self.get_items()
if len(items) == 0:
raise RuntimeError(f"site {client_name} doesn't have any items for to perform PSI")
# note, each interaction with client requires a new client,server keys to be secure.
self.psi_client = PSIClient(items)
self.psi_server = PSIServer(items, self.bloom_filter_fpr)
if PSIConst.ITEMS_SIZE in shareable:
target_item_size = shareable.get(PSIConst.ITEMS_SIZE)
setup_msg = self.psi_server.setup(target_item_size)
result = Shareable()
result[PSIConst.SETUP_MSG] = setup_msg
return result
elif PSIConst.ITEMS_SIZE_SET in shareable:
target_item_size_set = shareable.get(PSIConst.ITEMS_SIZE_SET)
result = Shareable()
setup_sets = {}
for client_iterm_size in target_item_size_set:
setup_msg = self.psi_server.setup(client_iterm_size)
setup_sets[str(client_iterm_size)] = setup_msg
result[PSIConst.SETUP_MSG] = setup_sets
return result
def get_items_size(self):
result = Shareable()
result[PSIConst.ITEMS_SIZE] = len(self.get_items())
return result
def process_request(self, shareable: Shareable):
if PSIConst.REQUEST_MSG in shareable:
request_msg = shareable.get(PSIConst.REQUEST_MSG)
response = self.psi_server.process_request(request_msg)
result = Shareable()
result[PSIConst.RESPONSE_MSG] = response
return result
elif PSIConst.REQUEST_MSG_SET in shareable:
request_msgs = shareable.get(PSIConst.REQUEST_MSG_SET)
result = Shareable()
client_responses = {}
for client_name in request_msgs:
response = self.psi_server.process_request(request_msgs[client_name])
client_responses[client_name] = response
result[PSIConst.RESPONSE_MSG] = client_responses
else:
raise ValueError(
"Required PSI Message PSIConst.PSI_REQUEST_MSG or PSIConst.PSI_REQUEST_MSG_SET is not provided"
)
return result
def calculate_intersection(self, shareable: Shareable):
response_msg = shareable.get(PSIConst.RESPONSE_MSG)
intersections = self.psi_client.get_intersection(response_msg)
self.intersects = intersections
self.local_psi_handler.save(intersections)
result = Shareable()
result[PSIConst.ITEMS_SIZE] = len(intersections)
return result
def get_items(self):
if not self.intersects:
if self.items is None:
items = self.local_psi_handler.load_items()
check_items_uniqueness(items)
self.items = items
else:
self.items = self.intersects
return self.items
| NVFlare-main | nvflare/app_opt/psi/dh_psi/dh_psi_task_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/psi/dh_psi/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
# version>= 1.0.3
import private_set_intersection.python as psi
class PSIClient:
"""
Class to represent the psi Client in a two-party client, server PSI model.
"""
def __init__(self, items: List[str]):
"""
Args:
items: the items provided by the client
"""
if len(items) == 0:
raise RuntimeError("Client items cannot be empty")
self.reveal_intersection = True
self.psi_client = psi.client.CreateWithNewKey(self.reveal_intersection)
self.items = items
self.setup = None
def get_items_size(self) -> int:
return len(self.items)
def receive_setup(self, setup_msg: str):
"""
Args:
setup_msg: serialized setup str
"""
s_setup_sub = psi.ServerSetup()
s_setup_sub.ParseFromString(setup_msg)
self.setup = s_setup_sub
def get_request(self, items):
self.items = items
request = self.psi_client.CreateRequest(items).SerializeToString()
return request
def get_intersection(self, server_response_msg: str) -> List[str]:
"""Returns the intersection of client and server items.
Args: server_response_msg (PsiProtoResponse): The server response serialized string
Returns:
The intersection set (List[str]) of client and server items
"""
resp_sub = psi.Response()
resp_sub.ParseFromString(server_response_msg)
response = resp_sub
client_item_indices = sorted(self.psi_client.GetIntersection(self.setup, response))
item_size = self.get_items_size()
# if the index is out of client item range, simply ignore.
return [self.items[i] for i in client_item_indices if i < item_size]
| NVFlare-main | nvflare/app_opt/psi/dh_psi/dh_psi_client.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
# version >=1.0.3
import private_set_intersection.python as psi
class PSIServer:
"""
Class to represent the psi server in a two-party client, server PSI model.
"""
def __init__(self, items: List[str], fpr: float = 1e-9):
"""
Args:
items: the items provided by the server
fpr: The false positive ratio,
note: if the fpr is very small such as 1e-11,
PSI algorithm can fail due to a known bug (https://github.com/OpenMined/PSI/issues/143)
"""
if len(items) == 0:
raise ValueError("Server items cannot be empty")
self.reveal_intersection = True
self.psi_server = psi.server.CreateWithNewKey(self.reveal_intersection)
self.items = items
self.fpr = fpr
def setup(self, client_items_size: int):
"""Return the psi setup
Args:
client_items_size (int): The length of the client items
Returns:
setup (ServerSetup): The server setup protobuf serialize string
"""
# version >= 1.0.3
setup = self.psi_server.CreateSetupMessage(
self.fpr, client_items_size, self.items, psi.DataStructure.BLOOM_FILTER
)
return setup.SerializeToString()
def process_request(self, client_request_msg) -> str:
"""Returns the corresponding response for the client to compute the private set intersection.
Args:
client_request_msg (Request): The client request serialized string
Returns:
response (Response): The server response serialized str
"""
req_stub = psi.Request()
req_stub.ParseFromString(client_request_msg)
request = req_stub
response = self.psi_server.ProcessRequest(request)
return response.SerializeToString()
| NVFlare-main | nvflare/app_opt/psi/dh_psi/dh_psi_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/tf/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_opt.tf.utils import get_flat_weights, load_flat_weights
class TFModelPersistor(ModelPersistor):
def __init__(self, model: tf.keras.Model, save_name="tf_model.ckpt"):
super().__init__()
self.save_name = save_name
self.model = model
def _initialize(self, fl_ctx: FLContext):
workspace = fl_ctx.get_engine().get_workspace()
app_root = workspace.get_app_dir(fl_ctx.get_job_id())
self._model_save_path = os.path.join(app_root, self.save_name)
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Initializes and loads the Model.
Args:
fl_ctx: FLContext
Returns:
ModelLearnable object
"""
if os.path.exists(self._model_save_path):
self.logger.info("Loading server model and weights")
self.model.load_weights(self._model_save_path)
# get flat model parameters
result = get_flat_weights(self.model)
model_learnable = make_model_learnable(result, dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Saves model.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
load_flat_weights(self.model, model_learnable[ModelLearnableKey.WEIGHTS])
self.model.save_weights(self._model_save_path)
| NVFlare-main | nvflare/app_opt/tf/model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SPECIAL_KEY = "_nvf_"
def get_flat_weights(network):
"""Gets flat network weights.
network get_weights() will give a dict of list of arrays.
for NVFlare server side to work, it needs a dict of arrays.
So we flatten this list using different key.
For example:
If the original network get_weights return: {"layer0": [array1, array2]}
We will flat it to: {"layer0_nvf_0": array1, "layer0_nvf_1": array2}
"""
result = {}
for layer in network.layers:
weights = layer.get_weights()
if len(weights) != 0:
for i, item in enumerate(weights):
result[f"{layer.name}{SPECIAL_KEY}{i}"] = item
return result
def load_flat_weights(network, data):
"""Loads the flat weights.
For example:
If the flat weight is: {"layer0_nvf_0": array1, "layer0_nvf_1": array2}
We will convert it back to: {"layer0": [array1, array2]} and load it back
"""
result = {}
for k, v in data.items():
if SPECIAL_KEY in k:
layer_name, _ = k.split(SPECIAL_KEY)
if layer_name not in result:
result[layer_name] = []
result[layer_name].append(v)
for k in result:
layer = network.get_layer(k)
layer.set_weights(result[k])
| NVFlare-main | nvflare/app_opt/tf/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple
from nvflare.apis.shareable import Shareable
from nvflare.widgets.widget import Widget
from .client import Client
from .engine_spec import EngineSpec
from .fl_context import FLContext
from .fl_snapshot import RunSnapshot
from .workspace import Workspace
class ServerEngineSpec(EngineSpec, ABC):
@abstractmethod
def fire_event(self, event_type: str, fl_ctx: FLContext):
pass
@abstractmethod
def get_clients(self) -> List[Client]:
pass
@abstractmethod
def sync_clients_from_main_process(self):
"""To fetch the participating clients from the main parent process
Returns: clients
"""
pass
@abstractmethod
def update_job_run_status(self):
"""To update the job run status to parent process."""
pass
@abstractmethod
def new_context(self) -> FLContext:
# the engine must use FLContextManager to create a new context!
pass
@abstractmethod
def get_workspace(self) -> Workspace:
pass
@abstractmethod
def get_component(self, component_id: str) -> object:
pass
@abstractmethod
def register_aux_message_handler(self, topic: str, message_handle_func):
"""Register aux message handling function with specified topics.
Exception is raised when:
a handler is already registered for the topic;
bad topic - must be a non-empty string
bad message_handle_func - must be callable
Implementation Note:
This method should simply call the ServerAuxRunner's register_aux_message_handler method.
Args:
topic: the topic to be handled by the func
message_handle_func: the func to handle the message. Must follow aux_message_handle_func_signature.
"""
pass
@abstractmethod
def send_aux_request(
self,
targets: [],
topic: str,
request: Shareable,
timeout: float,
fl_ctx: FLContext,
optional=False,
secure=False,
) -> dict:
"""Send a request to specified clients via the aux channel.
Implementation: simply calls the ServerAuxRunner's send_aux_request method.
Args:
targets: target clients. None or empty list means all clients
topic: topic of the request
request: request to be sent
timeout: number of secs to wait for replies. 0 means fire-and-forget.
fl_ctx: FL context
optional: whether this message is optional
secure: send the aux request in a secure way
Returns: a dict of replies (client name => reply Shareable)
"""
pass
def fire_and_forget_aux_request(
self, targets: [], topic: str, request: Shareable, fl_ctx: FLContext, optional=False, secure=False
) -> dict:
return self.send_aux_request(targets, topic, request, 0.0, fl_ctx, optional, secure=secure)
@abstractmethod
def get_widget(self, widget_id: str) -> Widget:
"""Get the widget with the specified ID.
Args:
widget_id: ID of the widget
Returns: the widget or None if not found
"""
pass
@abstractmethod
def persist_components(self, fl_ctx: FLContext, completed: bool):
"""To persist the FL running components
Args:
fl_ctx: FLContext
completed: flag to indicate where the run is complete
Returns:
"""
pass
@abstractmethod
def restore_components(self, snapshot: RunSnapshot, fl_ctx: FLContext):
"""To restore the FL components from the saved snapshot
Args:
snapshot: RunSnapshot
fl_ctx: FLContext
Returns:
"""
pass
@abstractmethod
def start_client_job(self, job_id, client_sites):
"""To send the start client run commands to the clients
Args:
client_sites: client sites
job_id: job_id
Returns:
"""
pass
@abstractmethod
def check_client_resources(
self, job_id: str, resource_reqs: Dict[str, dict]
) -> Dict[str, Tuple[bool, Optional[str]]]:
"""Sends the check_client_resources requests to the clients.
Args:
job_id: ID of the job
resource_reqs: A dict of {client_name: resource requirements dict}
Returns:
A dict of {client_name: client_check_result}.
client_check_result is a tuple of (is_resource_enough, token);
is_resource_enough is a bool indicates whether there is enough resources;
token is for resource reservation / cancellation for this check request.
"""
pass
@abstractmethod
def cancel_client_resources(
self, resource_check_results: Dict[str, Tuple[bool, str]], resource_reqs: Dict[str, dict]
):
"""Cancels the request resources for the job.
Args:
resource_check_results: A dict of {client_name: client_check_result}
where client_check_result is a tuple of (is_resource_enough, resource reserve token if any)
resource_reqs: A dict of {client_name: resource requirements dict}
"""
pass
@abstractmethod
def get_client_name_from_token(self, token: str) -> str:
"""Gets the client name from client login token.
Args:
token: client login token
Returns:
Client name
"""
pass
| NVFlare-main | nvflare/apis/server_engine_spec.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Dict, List, Union
from .dxo import DXO, DataKind, from_shareable
from .filter import Filter, FilterContextKey
from .fl_constant import ReturnCode
from .fl_context import FLContext
from .shareable import Shareable
from .utils.fl_context_utils import add_job_audit_event
class DXOFilter(Filter, ABC):
"""
This is the base class for DXO-based filters
"""
def __init__(self, supported_data_kinds: Union[None, List[str]], data_kinds_to_filter: Union[None, List[str]]):
"""
Args:
supported_data_kinds: kinds of DXO this filter supports. Empty means all kinds.
data_kinds_to_filter: kinds of DXO data to filter. Empty means all kinds.
"""
Filter.__init__(self)
if supported_data_kinds and not isinstance(supported_data_kinds, list):
raise ValueError(f"supported_data_kinds must be a list of str but got {type(supported_data_kinds)}")
if data_kinds_to_filter and not isinstance(data_kinds_to_filter, list):
raise ValueError(f"data_kinds_to_filter must be a list of str but got {type(data_kinds_to_filter)}")
if supported_data_kinds and data_kinds_to_filter:
if not all(dk in supported_data_kinds for dk in data_kinds_to_filter):
raise ValueError(f"invalid data kinds: {data_kinds_to_filter}. Only support {data_kinds_to_filter}")
if not data_kinds_to_filter:
data_kinds_to_filter = supported_data_kinds
self.data_kinds = data_kinds_to_filter
def process(self, shareable: Shareable, fl_ctx: FLContext):
rc = shareable.get_return_code()
if rc != ReturnCode.OK:
# don't process if RC not OK
return shareable
try:
dxo = from_shareable(shareable)
except:
# not a DXO based shareable - pass
return shareable
if dxo.data is None:
self.log_debug(fl_ctx, "DXO has no data to filter")
return shareable
start = [dxo]
self._filter_dxos(start, shareable, fl_ctx)
result_dxo = start[0]
return result_dxo.update_shareable(shareable)
@abstractmethod
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Subclass must implement this method to filter the provided DXO
Args:
dxo: the DXO to be filtered
shareable: the shareable that the dxo belongs to
fl_ctx: the FL context
Returns:
A DXO object that is the result of the filtering, if filtered;
None if not filtered.
"""
pass
def _apply_filter(self, dxo: DXO, shareable, fl_ctx: FLContext) -> DXO:
if not dxo.data:
self.log_debug(fl_ctx, "DXO has no data to filter")
return dxo
filter_name = self.__class__.__name__
result = self.process_dxo(dxo, shareable, fl_ctx)
if not result:
# not filtered
result = dxo
elif not isinstance(result, DXO):
raise RuntimeError(f"Result from {filter_name} is {type(result)} - must be DXO")
else:
if result != dxo:
# result is a new DXO - copy filter history from original dxo
result.add_filter_history(dxo.get_filter_history())
result.add_filter_history(filter_name)
chain_type = self.get_prop(FilterContextKey.CHAIN_TYPE, "?")
source = self.get_prop(FilterContextKey.SOURCE, "?")
add_job_audit_event(fl_ctx=fl_ctx, msg=f"applied filter: {filter_name}@{source} on {chain_type}")
return result
def _filter_dxos(self, dxo_collection: Union[List[DXO], Dict[str, DXO]], shareable, fl_ctx):
if isinstance(dxo_collection, list):
for i in range(len(dxo_collection)):
v = dxo_collection[i]
if not isinstance(v, DXO):
continue
if v.data_kind == DataKind.COLLECTION:
self._filter_dxos(v.data, shareable, fl_ctx)
elif not self.data_kinds or v.data_kind in self.data_kinds:
dxo_collection[i] = self._apply_filter(v, shareable, fl_ctx)
elif isinstance(dxo_collection, dict):
for k, v in dxo_collection.items():
assert isinstance(v, DXO)
if v.data_kind == DataKind.COLLECTION:
self._filter_dxos(v.data, shareable, fl_ctx)
elif not self.data_kinds or v.data_kind in self.data_kinds:
dxo_collection[k] = self._apply_filter(v, shareable, fl_ctx)
else:
raise ValueError(f"DXO COLLECTION must be a dict or list but got {type(dxo_collection)}")
| NVFlare-main | nvflare/apis/dxo_filter.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from enum import Enum
from typing import Dict, List, Optional
from nvflare.apis.fl_constant import SystemComponents
from nvflare.apis.fl_context import FLContext
# this is treated as all online sites in job deploy_map
ALL_SITES = "@ALL"
SERVER_SITE_NAME = "server"
class RunStatus(str, Enum):
SUBMITTED = "SUBMITTED"
APPROVED = "APPROVED"
DISPATCHED = "DISPATCHED"
RUNNING = "RUNNING"
FINISHED_COMPLETED = "FINISHED:COMPLETED"
FINISHED_ABORTED = "FINISHED:ABORTED"
FINISHED_EXECUTION_EXCEPTION = "FINISHED:EXECUTION_EXCEPTION"
FINISHED_ABNORMAL = "FINISHED:ABNORMAL"
FINISHED_CANT_SCHEDULE = "FINISHED:CAN_NOT_SCHEDULE"
FAILED_TO_RUN = "FINISHED:FAILED_TO_RUN"
ABANDONED = "FINISHED:ABANDONED"
class JobDataKey(str, Enum):
DATA = "data"
META = "meta"
JOB_DATA = "job_data_"
WORKSPACE_DATA = "workspace_data_"
class JobMetaKey(str, Enum):
JOB_ID = "job_id"
JOB_NAME = "name"
JOB_FOLDER_NAME = "job_folder_name"
SUBMITTER_NAME = "submitter_name"
SUBMITTER_ORG = "submitter_org"
SUBMITTER_ROLE = "submitter_role"
STATUS = "status"
DEPLOY_MAP = "deploy_map"
RESOURCE_SPEC = "resource_spec"
CONTENT_LOCATION = "content_location"
RESULT_LOCATION = "result_location"
APPROVALS = "approvals"
MIN_CLIENTS = "min_clients"
MANDATORY_CLIENTS = "mandatory_clients"
SUBMIT_TIME = "submit_time"
SUBMIT_TIME_ISO = "submit_time_iso"
START_TIME = "start_time"
DURATION = "duration"
JOB_DEPLOY_DETAIL = "job_deploy_detail"
SCHEDULE_COUNT = "schedule_count"
SCOPE = "scope"
CLONED_FROM = "cloned_from"
LAST_SCHEDULE_TIME = "last_schedule_time"
SCHEDULE_HISTORY = "schedule_history"
STATS_POOL_CONFIG = "stats_pool_config"
FROM_HUB_SITE = "from_hub_site"
CUSTOM_PROPS = "custom_props"
def __repr__(self):
return self.value
class TopDir(object):
JOB = "job"
WORKSPACE = "workspace"
class Job:
def __init__(
self,
job_id: str,
resource_spec: Dict[str, Dict],
deploy_map: Dict[str, List[str]],
meta,
min_sites: int = 1,
required_sites: Optional[List[str]] = None,
):
"""Job object containing the job metadata.
Args:
job_id: Job ID
resource_spec: Resource specification with information on the resources of each client
deploy_map: Deploy map specifying each app and the sites that it should be deployed to
meta: full contents of the persisted metadata for the job for persistent storage
min_sites (int): minimum number of sites
required_sites: A list of required site names
"""
self.job_id = job_id
self.resource_spec = resource_spec # resource_requirements should be {site name: resource}
self.deploy_map = deploy_map # should be {app name: a list of sites}
self.meta = meta
self.min_sites = min_sites
self.required_sites = required_sites
if not self.required_sites:
self.required_sites = []
self.dispatcher_id = None
self.dispatch_time = None
self.submit_time = None
self.run_record = None # job id, dispatched time/UUID, finished time, completion code (normal, aborted)
self.run_aborted = False
def get_deployment(self) -> Dict[str, List[str]]:
"""Returns the deployment configuration.
::
"deploy_map": {
"hello-numpy-sag-server": [
"server"
],
"hello-numpy-sag-client": [
"client1",
"client2"
],
"hello-numpy-sag-client3": [
"client3"
]
},
Returns:
Contents of deploy_map as a dictionary of strings of app names with their corresponding sites
"""
return self.deploy_map
def get_application(self, app_name, fl_ctx: FLContext) -> bytes:
"""Get the application content in bytes for the specified participant."""
# application_name = self.get_application_name(participant)
engine = fl_ctx.get_engine()
job_def_manager = engine.get_component(SystemComponents.JOB_MANAGER)
# # if not isinstance(job_def_manager, JobDefManagerSpec):
# # raise TypeError(f"job_def_manager must be JobDefManagerSpec type. Got: {type(job_def_manager)}")
return job_def_manager.get_app(self, app_name, fl_ctx)
def get_application_name(self, participant):
"""Get the application name for the specified participant."""
for app in self.deploy_map:
for site in self.deploy_map[app]:
if site == participant:
return app
return None
def get_resource_requirements(self):
"""Returns app resource requirements.
Returns:
A dict of {site_name: resource}
"""
return self.resource_spec
def __eq__(self, other):
return self.job_id == other.job_id
def job_from_meta(meta: dict) -> Job:
"""Converts information in meta into a Job object.
Args:
meta: dict of meta information
Returns:
A Job object.
"""
job = Job(
job_id=meta.get(JobMetaKey.JOB_ID, ""),
resource_spec=meta.get(JobMetaKey.RESOURCE_SPEC, {}),
deploy_map=meta.get(JobMetaKey.DEPLOY_MAP, {}),
meta=meta,
min_sites=meta.get(JobMetaKey.MIN_CLIENTS, 1),
required_sites=meta.get(JobMetaKey.MANDATORY_CLIENTS, []),
)
return job
def new_job_id() -> str:
return str(uuid.uuid4())
def is_valid_job_id(jid: str) -> bool:
if not isinstance(jid, str):
return False
try:
val = uuid.UUID(jid, version=4)
except ValueError:
return False
# If the jid string is a valid hex code, but an invalid uuid4,the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == jid.replace("-", "")
def get_custom_prop(meta: dict, prop_key: str, default=None):
props = meta.get(JobMetaKey.CUSTOM_PROPS)
if not props:
return default
return props.get(prop_key, default)
def get_custom_props(meta: dict, default=None):
return meta.get(JobMetaKey.CUSTOM_PROPS, default)
| NVFlare-main | nvflare/apis/job_def.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Dict, Tuple
class AppValidationKey(object):
BYOC = "byoc"
class AppValidator(ABC):
@abstractmethod
def validate(self, app_folder: str) -> Tuple[str, Dict]:
"""Validate and/or clean the content of specified application folder.
Args:
app_folder: path to the app folder to be validated
Returns:
A tuple of (error_msg, app_validation_props)
error_msg contains error message if failed to pass; otherwise an empty string.
app_validation_props is a dict of properties of the app.
For example: the result could be ("", {"byoc": True})
"""
pass
| NVFlare-main | nvflare/apis/app_validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from .fl_context import FLContext
from .workspace import Workspace
class AppDeployerSpec(ABC):
@abstractmethod
def deploy(
self, workspace: Workspace, job_id: str, job_meta: dict, app_name: str, app_data: bytes, fl_ctx: FLContext
) -> str:
pass
| NVFlare-main | nvflare/apis/app_deployer_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import List, Union
from nvflare.apis.fl_constant import FLMetaKey
from nvflare.apis.shareable import ReservedHeaderKey, Shareable
from nvflare.fuel.utils import fobs
class DataKind(object):
FL_MODEL = "FL_MODEL"
WEIGHTS = "WEIGHTS"
WEIGHT_DIFF = "WEIGHT_DIFF"
XGB_MODEL = "XGB_MODEL"
METRICS = "METRICS"
ANALYTIC = "ANALYTIC"
COLLECTION = "COLLECTION" # Dict or List of DXO objects
STATISTICS = "STATISTICS"
PSI = "PSI"
class MetaKey(FLMetaKey):
pass
_KEY_KIND = "kind"
_KEY_DATA = "data"
_KEY_META = "meta"
_KEY_DXO = "DXO"
class DXO(object):
def __init__(self, data_kind: str, data: dict, meta: dict = None):
"""Init the DXO.
The Data Exchange Object standardizes the data passed between communicating parties.
Args:
data_kind: kind of data
data: clear-text data
meta: None or dict for any additional properties
"""
if data is None:
data = {}
if meta is None:
meta = {}
self.data_kind = data_kind
self.data = data
self.meta = meta
err = self.validate()
if err:
raise ValueError("invalid DXO: {}".format(err))
def get_meta_prop(self, key: str, default=None):
if self.meta and isinstance(self.meta, dict):
return self.meta.get(key, default)
return default
def set_meta_prop(self, key: str, value):
if self.meta is None:
self.meta = {}
self.meta[key] = value
def remove_meta_props(self, keys: List[str]):
if self.meta and keys:
for k in keys:
self.meta.pop(k, None)
def get_meta_props(self):
return self.meta
def update_meta_props(self, meta):
self.meta.update(copy.deepcopy(meta))
def _encode(self) -> dict:
return {_KEY_KIND: self.data_kind, _KEY_DATA: self.data, _KEY_META: self.meta}
def update_shareable(self, s: Shareable) -> Shareable:
s.set_header(key=ReservedHeaderKey.CONTENT_TYPE, value="DXO")
s[_KEY_DXO] = self._encode()
return s
def to_shareable(self) -> Shareable:
"""Convert the DXO object into Shareable.
Returns:
Shareable object.
"""
s = Shareable()
return self.update_shareable(s)
def to_bytes(self) -> bytes:
"""Serialize the DXO object into bytes.
Returns:
object serialized in bytes.
"""
return fobs.dumps(self)
def validate(self) -> str:
if self.data is None:
return "missing data"
if not isinstance(self.data, dict):
return "invalid data: expect dict but got {}".format(type(self.data))
if self.meta is not None and not isinstance(self.meta, dict):
return "invalid props: expect dict but got {}".format(type(self.meta))
return ""
def add_filter_history(self, filter_name: Union[str, List[str]]):
if not filter_name:
return
hist = self.get_meta_prop(MetaKey.FILTER_HISTORY)
if not hist:
hist = []
self.set_meta_prop(MetaKey.FILTER_HISTORY, hist)
if isinstance(filter_name, str):
hist.append(filter_name)
elif isinstance(filter_name, list):
hist.extend(filter_name)
def get_filter_history(self):
return self.get_meta_prop(MetaKey.FILTER_HISTORY)
def from_shareable(s: Shareable) -> DXO:
"""Convert Shareable into a DXO object.
Args:
s: Shareable object
Returns:
DXO object.
"""
content_type = s.get_header(ReservedHeaderKey.CONTENT_TYPE)
if not content_type or content_type != "DXO":
raise ValueError("the shareable is not a valid DXO - expect content_type DXO but got {}".format(content_type))
encoded = s.get(_KEY_DXO, None)
if not encoded:
raise ValueError("the shareable is not a valid DXO - missing content")
if not isinstance(encoded, dict):
raise ValueError(
"the shareable is not a valid DXO - should be encoded as dict but got {}".format(type(encoded))
)
k = encoded.get(_KEY_KIND, None)
d = encoded.get(_KEY_DATA, None)
m = encoded.get(_KEY_META, None)
return DXO(data_kind=k, data=d, meta=m)
def from_bytes(data: bytes) -> DXO:
"""Convert the data bytes into Model object.
Args:
data: a bytes object
Returns:
an object loaded by FOBS from data
"""
x = fobs.loads(data)
if isinstance(x, DXO):
return x
else:
raise ValueError("Data bytes are from type {} and do not represent a valid DXO instance.".format(type(x)))
def get_leaf_dxos(dxo: DXO, root_name: str = "") -> (dict, list):
"""Traverse the specified dxo tree and return all leaf DXOs.
The input dxo is a simple DXO or a collection DXO as a dict of DXOs.
Args:
dxo: the DXO object to be traversed
root_name: the root name of the DXO
Returns: a dict of dxo_path => DXO object. The dxo path is the full path from the root to the leaf node,
concatenation of all node names, separated by dots.
A list of errors encountered during traversing.
"""
result = {}
errors = []
_traverse(dxo, root_name, result, errors, {})
return result, errors
def _traverse(dxo: DXO, name: str, result, errors, visited: dict):
obj_id = id(dxo)
if visited.get(obj_id):
print(f"dxo {name} already visited - ignore it")
return
visited[obj_id] = True
if not isinstance(dxo, DXO):
errors.append(f"dxo '{name}' must be DXO but got {type(dxo)}")
return
if dxo.data_kind == DataKind.COLLECTION:
if not isinstance(dxo.data, dict):
errors.append(f"dxo '{name}' is a collection but data is {type(dxo.data)} - must be dict")
return
for k, v in dxo.data.items():
_traverse(v, f"{name}.{k}", result, errors, visited)
else:
result[name] = dxo
| NVFlare-main | nvflare/apis/dxo.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
class Signal(object):
def __init__(self, parent=None):
"""Init the Signal.
Used to signal between and within FL Components.
"""
self._value = None
self._trigger_time = None
self._triggered = False
self._parent = parent
def trigger(self, value):
"""Trigger the Signal.
Args:
value: set the value of the signal
"""
self._value = value
self._trigger_time = time.time()
self._triggered = True
@property
def value(self):
return self._value
@property
def trigger_time(self):
return self._trigger_time
def reset(self, value=None):
"""Reset the Signal.
Args:
value: reset the value of the signal
"""
self._value = value
self._trigger_time = None
self._triggered = False
@property
def triggered(self):
if self._triggered:
return True
if self._parent:
return self._parent.triggered
else:
return False
| NVFlare-main | nvflare/apis/signal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from nvflare.apis.dxo import DXO, DataKind
from nvflare.app_common.tracking.tracker_types import LogWriterName, TrackConst
_DATA_TYPE_KEY = "analytics_data_type"
_KWARGS_KEY = "analytics_kwargs"
class AnalyticsDataType(Enum):
SCALARS = "SCALARS"
SCALAR = "SCALAR"
IMAGE = "IMAGE"
TEXT = "TEXT"
LOG_RECORD = "LOG_RECORD"
PARAMETER = "PARAMETER"
PARAMETERS = "PARAMETERS"
METRIC = "METRIC"
METRICS = "METRICS"
MODEL = "MODEL"
# # MLFLOW ONLY
TAG = "TAG"
TAGS = "TAGS"
INIT_DATA = "INIT_DATA"
class AnalyticsData:
def __init__(
self,
key: str,
value,
data_type: AnalyticsDataType,
sender: LogWriterName = LogWriterName.TORCH_TB,
**kwargs,
):
"""This class defines AnalyticsData format.
It is a wrapper to provide to/from DXO conversion.
Args:
key (str): tag name
value: value
data_type (AnalyticDataType): type of the analytic data.
sender (LogWriterName): Type of sender for syntax such as Tensorboard or MLflow
kwargs (optional, dict): additional arguments to be passed.
"""
self._validate_data_types(data_type, key, value, **kwargs)
self.tag = key
self.value = value
self.data_type = data_type
self.kwargs = kwargs
self.sender = sender
self.step = kwargs.get(TrackConst.GLOBAL_STEP_KEY, None)
self.path = kwargs.get(TrackConst.PATH_KEY, None)
def to_dxo(self):
"""Converts the AnalyticsData to DXO object.
Returns:
DXO object
"""
data = {TrackConst.TRACK_KEY: self.tag, TrackConst.TRACK_VALUE: self.value}
if self.step is not None:
data[TrackConst.GLOBAL_STEP_KEY] = self.step
if self.path:
data[TrackConst.PATH_KEY] = self.path
if self.kwargs:
data[TrackConst.KWARGS_KEY] = self.kwargs
dxo = DXO(data_kind=DataKind.ANALYTIC, data=data)
dxo.set_meta_prop(TrackConst.DATA_TYPE_KEY, self.data_type)
dxo.set_meta_prop(TrackConst.TRACKER_KEY, self.sender)
return dxo
@classmethod
def from_dxo(cls, dxo: DXO, receiver: LogWriterName = LogWriterName.TORCH_TB):
"""Generates the AnalyticsData from DXO object.
Args:
receiver: type of the experiment tacker, defaults to Tensorboard with LogWriterName.TORCH_TB.
dxo (DXO): The DXO object to convert.
Returns:
AnalyticsData object
"""
if not isinstance(dxo, DXO):
raise TypeError("expect dxo to be an instance of DXO, but got {}.".format(type(dxo)))
if len(dxo.data) == 0:
raise ValueError(
"dxo does not have the correct format for AnalyticsData; expected dxo.data to be length > 0, but got 0"
)
data = dxo.data
key = data[TrackConst.TRACK_KEY]
value = data[TrackConst.TRACK_VALUE]
kwargs = data.get(TrackConst.KWARGS_KEY, {})
step = data.get(TrackConst.GLOBAL_STEP_KEY, None)
if step is not None:
kwargs[TrackConst.GLOBAL_STEP_KEY] = step
data_type = dxo.get_meta_prop(TrackConst.DATA_TYPE_KEY)
writer = dxo.get_meta_prop(TrackConst.TRACKER_KEY)
if writer is not None and writer != receiver:
data_type = cls.convert_data_type(data_type, writer, receiver)
if not data_type:
return None
if not kwargs:
return cls(key, value, data_type, writer)
else:
return cls(key, value, data_type, writer, **kwargs)
def _validate_data_types(
self,
data_type: AnalyticsDataType,
key: str,
value: any,
**kwargs,
):
if not isinstance(key, str):
raise TypeError("expect tag to be an instance of str, but got {}.".format(type(key)))
if not isinstance(data_type, AnalyticsDataType):
raise TypeError(
"expect data_type to be an instance of AnalyticsDataType, but got {}.".format(type(data_type))
)
if kwargs and not isinstance(kwargs, dict):
raise TypeError("expect kwargs to be an instance of dict, but got {}.".format(type(kwargs)))
step = kwargs.get(TrackConst.GLOBAL_STEP_KEY, None)
if step:
if not isinstance(step, int):
raise TypeError("expect step to be an instance of int, but got {}.".format(type(step)))
if step < 0:
raise ValueError("expect step to be non-negative int, but got {}.".format(step))
path = kwargs.get(TrackConst.PATH_KEY, None)
if path and not isinstance(path, str):
raise TypeError("expect path to be an instance of str, but got {}.".format(type(step)))
if data_type in [AnalyticsDataType.SCALAR, AnalyticsDataType.METRIC] and not isinstance(value, float):
raise TypeError(f"expect '{key}' value to be an instance of float, but got '{type(value)}'.")
elif data_type in [
AnalyticsDataType.METRICS,
AnalyticsDataType.PARAMETERS,
AnalyticsDataType.SCALARS,
] and not isinstance(value, dict):
raise TypeError(f"expect '{key}' value to be an instance of dict, but got '{type(value)}'.")
elif data_type == AnalyticsDataType.TEXT and not isinstance(value, str):
raise TypeError(f"expect '{key}' value to be an instance of str, but got '{type(value)}'.")
elif data_type == AnalyticsDataType.TAGS and not isinstance(value, dict):
raise TypeError(
f"expect '{key}' data type expects value to be an instance of dict, but got '{type(value)}'"
)
@classmethod
def convert_data_type(
cls, sender_data_type: AnalyticsDataType, sender: LogWriterName, receiver: LogWriterName
) -> AnalyticsDataType:
if sender == LogWriterName.TORCH_TB and receiver == LogWriterName.MLFLOW:
if AnalyticsDataType.SCALAR == sender_data_type:
return AnalyticsDataType.METRIC
elif AnalyticsDataType.SCALARS == sender_data_type:
return AnalyticsDataType.METRICS
else:
return sender_data_type
if sender == LogWriterName.MLFLOW and receiver == LogWriterName.TORCH_TB:
if AnalyticsDataType.PARAMETER == sender_data_type:
return AnalyticsDataType.SCALAR
elif AnalyticsDataType.PARAMETERS == sender_data_type:
return AnalyticsDataType.SCALARS
elif AnalyticsDataType.METRIC == sender_data_type:
return AnalyticsDataType.SCALAR
elif AnalyticsDataType.METRICS == sender_data_type:
return AnalyticsDataType.SCALARS
else:
return sender_data_type
def __str__(self) -> str:
return f"AnalyticsData(tag: {self.tag}, value: {self.value}, data_type: {self.data_type}, kwargs: {self.kwargs}, step: {self.step})"
| NVFlare-main | nvflare/apis/analytix.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from typing import Any, Dict, List
from .fl_constant import ReservedKey
_update_lock = threading.Lock()
MASK_STICKY = 1 << 0
MASK_PRIVATE = 1 << 1
V = "value"
M = "mask"
def is_sticky(mask) -> bool:
return mask & MASK_STICKY > 0
def is_private(mask) -> bool:
return mask & MASK_PRIVATE > 0
def make_mask(private, sticky):
mask = 0
if private:
mask += MASK_PRIVATE
if sticky:
mask += MASK_STICKY
return mask
def to_string(mask) -> str:
if is_private(mask):
result = "private:"
else:
result = "public:"
if is_sticky(mask):
return result + "sticky"
else:
return result + "non-sticky"
class FLContext(object):
def __init__(self):
"""Init the FLContext.
The FLContext is used to passed data between FL Components.
It can be thought of as a dictionary that stores key/value pairs called props (properties).
Visibility: private props are only visible to local components,
public props are also visible to remote components
Stickiness: sticky props become available in all future FL Contexts,
non-sticky props will only be available in the current FL Context
"""
self.model = None
self.props = {}
self.logger = logging.getLogger(self.__class__.__name__)
def get_prop_keys(self) -> List[str]:
return list(self.props.keys())
def public_key_exists(self, key) -> bool:
return key in self.props and not is_private(self.props[key][M])
def get_all_public_props(self) -> Dict[str, Any]:
result = {}
with _update_lock:
for k, v in self.props.items():
if not is_private(v[M]):
_, result[k] = self._get_prop(k)
return result
def _get_ctx_manager(self):
p = self.props.get(ReservedKey.MANAGER, None)
if p:
return p[V]
else:
return None
def _get_prop(self, key: str) -> (bool, Any):
"""
Get the prop with the specified key.
If the property is sticky, its value will be retrieved from the base (the ctx manager)
Args:
key: key of the property
Returns: tuple: whether the property exists, and the value of the prop if exists.
"""
# check local first
p = self.props.get(key)
if p:
mask = p[M]
if not is_sticky(mask):
return True, p[V]
# either the prop does not exist locally or it is sticky
# check with the ctx manager
ctx_manager = self._get_ctx_manager()
if ctx_manager:
assert isinstance(ctx_manager, FLContextManager)
exists, value, mask = ctx_manager.check_sticker(key)
if exists:
self.props[key] = {V: value, M: mask}
if key in self.props:
return True, self.props[key][V]
else:
return False, None
def set_prop(self, key: str, value, private=True, sticky=True):
if not isinstance(key, str):
raise ValueError("prop key must be str, but got {}".format(type(key)))
with _update_lock:
mask = make_mask(private, sticky)
# see whether a prop with the same key is already defined locally in this ctx
if key in self.props:
existing_mask = self.props[key][M]
if mask != existing_mask:
self.logger.warning(
f"property '{key}' already exists with attributes "
f"{to_string(existing_mask)}, cannot change to {to_string(mask)}"
)
return False
# if the prop is sticky, also check with ctx manager to make sure it is consistent with existing mask
if sticky:
# check attributes
ctx_manager = self._get_ctx_manager()
if ctx_manager:
assert isinstance(ctx_manager, FLContextManager)
exists, _, existing_mask = ctx_manager.check_sticker(key)
if exists and mask != existing_mask:
self.logger.warning(
f"property '{key}' already exists with attributes "
f"{to_string(existing_mask)}, cannot change to {to_string(mask)}"
)
return False
ctx_manager.update_sticker(key, value, mask)
self.props[key] = {V: value, M: mask}
return True
def get_prop(self, key, default=None):
with _update_lock:
exists, value = self._get_prop(key)
if exists:
return value
else:
return default
def get_custom_prop(self, key: str, default=None):
props = self.get_prop(ReservedKey.CUSTOM_PROPS)
if not props:
return default
return props.get(key, default)
def set_custom_prop(self, key: str, value):
props = self.get_prop(ReservedKey.CUSTOM_PROPS)
if not props:
props = {}
self.set_prop(ReservedKey.CUSTOM_PROPS, props, sticky=False, private=True)
props[key] = value
def get_prop_detail(self, key):
with _update_lock:
if key in self.props:
prop = self.props.get(key)
mask = prop[M]
_, value = self._get_prop(key)
return {V: value, "private": is_private(mask), "sticky": is_sticky(mask)}
else:
return None
def remove_prop(self, key: str):
if not isinstance(key, str):
return
if key.startswith("__"):
# do not allow removal of reserved props!
return
with _update_lock:
self.props.pop(key, None)
def __str__(self):
raw_list = [f"{k}: {type(v[V])}" for k, v in self.props.items()]
return " ".join(raw_list)
# some convenience methods
def _simple_get(self, key: str, default=None):
p = self.props.get(key)
return p[V] if p else default
def get_engine(self, default=None):
return self._simple_get(ReservedKey.ENGINE, default)
def get_job_id(self, default=None):
return self._simple_get(ReservedKey.RUN_NUM, default)
def get_identity_name(self, default=""):
return self._simple_get(ReservedKey.IDENTITY_NAME, default=default)
def set_job_is_unsafe(self, value: bool = True):
self.set_prop(ReservedKey.JOB_IS_UNSAFE, value, private=True, sticky=True)
def is_job_unsafe(self):
return self.get_prop(ReservedKey.JOB_IS_UNSAFE, False)
def get_run_abort_signal(self):
return self._simple_get(key=ReservedKey.RUN_ABORT_SIGNAL, default=None)
def set_peer_context(self, ctx):
self.put(key=ReservedKey.PEER_CTX, value=ctx, private=True, sticky=False)
def get_peer_context(self):
return self._simple_get(key=ReservedKey.PEER_CTX, default=None)
def set_public_props(self, metadata: dict):
# remove all public props
self.props = {k: v for k, v in self.props.items() if is_private(v[M] or is_sticky(v[M]))}
for key, value in metadata.items():
self.set_prop(key, value, private=False, sticky=False)
def sync_sticky(self):
# no longer needed since sticky props are always synced
pass
def put(self, key: str, value, private, sticky):
"""
Simply put the prop into the fl context without doing sticky property processing
Args:
key:
value:
private:
sticky:
Returns:
"""
self.props[key] = {V: value, M: make_mask(private, sticky)}
# implement Context Manager protocol
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# no longer needed since sticky props are always synced
pass
class FLContextManager(object):
"""FLContextManager manages the creation and updates of FLContext objects for a run.
NOTE: The engine may create a new FLContextManager object for each RUN!
"""
def __init__(
self, engine=None, identity_name: str = "", job_id: str = "", public_stickers=None, private_stickers=None
):
"""Init the FLContextManager.
Args:
engine: the engine that created this FLContextManager object
identity_name (str): identity name
job_id: the job id
public_stickers: public sticky properties that are copied into or copied from
private_stickers: private sticky properties that are copied into or copied from
"""
self.engine = engine
self.identity_name = identity_name
self.job_id = job_id
self._update_lock = threading.Lock()
self.public_stickers = {}
self.private_stickers = {}
if public_stickers and isinstance(public_stickers, dict):
self.public_stickers.update(public_stickers)
if private_stickers and isinstance(private_stickers, dict):
self.private_stickers.update(private_stickers)
def new_context(self) -> FLContext:
"""Create a new FLContext object.
Sticky properties are copied from the stickers into the new context.
Returns: a FLContext object
"""
ctx = FLContext()
ctx.put(key=ReservedKey.MANAGER, value=self, private=True, sticky=False)
# set permanent props
ctx.put(key=ReservedKey.ENGINE, value=self.engine, private=True, sticky=False)
ctx.put(key=ReservedKey.RUN_NUM, value=self.job_id, private=False, sticky=True)
if self.identity_name:
ctx.put(key=ReservedKey.IDENTITY_NAME, value=self.identity_name, private=False, sticky=False)
with self._update_lock:
for k, v in self.public_stickers.items():
ctx.put(key=k, value=v, sticky=True, private=False)
for k, v in self.private_stickers.items():
ctx.put(key=k, value=v, sticky=True, private=True)
return ctx
@staticmethod
def _get_sticker(stickers, key) -> (bool, Any):
"""
Get sticker with specified key
Args:
stickers:
key:
Returns: tuple: whether the sticker exists, value of the sticker if exists
"""
if key in stickers:
return True, stickers[key]
else:
return False, None
def check_sticker(self, key: str) -> (bool, Any, int):
"""
Check whether a sticky prop exists in either the public or private group.
Args:
key: the key of the sticker to be checked
Returns: tuple: whether the sticker exists, its value and mask if it exists
"""
with self._update_lock:
exists, value = self._get_sticker(self.private_stickers, key)
if exists:
return exists, value, make_mask(True, True)
exists, value = self._get_sticker(self.public_stickers, key)
if exists:
return exists, value, make_mask(False, True)
return False, None, 0
def update_sticker(self, key: str, value, mask):
"""
Update the value of a specified sticker.
Args:
key: key of the sticker to be updated
value: value of the sticker
mask: mask to determine whether the sticker is public or private
Returns:
"""
with self._update_lock:
if is_private(mask):
stickers = self.private_stickers
else:
stickers = self.public_stickers
stickers[key] = value
| NVFlare-main | nvflare/apis/fl_context.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EventType(object):
"""Built-in system events."""
SYSTEM_START = "_system_start"
SYSTEM_END = "_system_end"
ABOUT_TO_START_RUN = "_about_to_start_run"
START_RUN = "_start_run"
ABOUT_TO_END_RUN = "_about_to_end_run"
END_RUN = "_end_run"
SWAP_IN = "_swap_in"
SWAP_OUT = "_swap_out"
START_WORKFLOW = "_start_workflow"
END_WORKFLOW = "_end_workflow"
ABORT_TASK = "_abort_task"
FATAL_SYSTEM_ERROR = "_fatal_system_error"
JOB_DEPLOYED = "_job_deployed"
JOB_STARTED = "_job_started"
JOB_COMPLETED = "_job_completed"
JOB_ABORTED = "_job_aborted"
JOB_CANCELLED = "_job_cancelled"
BEFORE_PULL_TASK = "_before_pull_task"
AFTER_PULL_TASK = "_after_pull_task"
BEFORE_PROCESS_SUBMISSION = "_before_process_submission"
AFTER_PROCESS_SUBMISSION = "_after_process_submission"
BEFORE_TASK_DATA_FILTER = "_before_task_data_filter"
AFTER_TASK_DATA_FILTER = "_after_task_data_filter"
BEFORE_TASK_RESULT_FILTER = "_before_task_result_filter"
AFTER_TASK_RESULT_FILTER = "_after_task_result_filter"
BEFORE_TASK_EXECUTION = "_before_task_execution"
AFTER_TASK_EXECUTION = "_after_task_execution"
BEFORE_SEND_TASK_RESULT = "_before_send_task_result"
AFTER_SEND_TASK_RESULT = "_after_send_task_result"
CRITICAL_LOG_AVAILABLE = "_critical_log_available"
ERROR_LOG_AVAILABLE = "_error_log_available"
EXCEPTION_LOG_AVAILABLE = "_exception_log_available"
WARNING_LOG_AVAILABLE = "_warning_log_available"
INFO_LOG_AVAILABLE = "_info_log_available"
DEBUG_LOG_AVAILABLE = "_debug_log_available"
PRE_RUN_RESULT_AVAILABLE = "_pre_run_result_available"
# event types for job scheduling - server side
BEFORE_CHECK_CLIENT_RESOURCES = "_before_check_client_resources"
# event types for job scheduling - client side
BEFORE_CHECK_RESOURCE_MANAGER = "_before_check_resource_manager"
BEFORE_BUILD_COMPONENT = "_before_build_component"
| NVFlare-main | nvflare/apis/event_type.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import uuid
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, List, Optional, Union
from nvflare.apis.signal import Signal
from .client import Client
from .fl_context import FLContext
from .shareable import ReservedHeaderKey, Shareable
class TaskCompletionStatus(Enum):
OK = "ok"
TIMEOUT = "timeout"
ERROR = "error"
CANCELLED = "cancelled"
ABORTED = "aborted"
IGNORED = "ignored"
CLIENT_DEAD = "client_dead"
class TaskOperatorKey:
OP_ID = "op_id"
METHOD = "method" # bcast, relay, etc.
NUM_ROUNDS = "num_rounds"
TARGETS = "targets" # list of leaf nodes
DATA_FILTERS = "data_filters"
RESULT_FILTERS = "result_filters"
AGGREGATOR = "aggregator" # only for bcast
SHAREABLE_GENERATOR = "shareable_gen" # only for relay
PERSISTOR = "persistor" # only for relay
TIMEOUT = "timeout"
TASK_ASSIGNMENT_TIMEOUT = "task_assign_timeout" # for relay
MIN_TARGETS = "min_targets"
WAIT_TIME_AFTER_MIN_RESPS = "wait_time_after_min_received"
class OperatorMethod:
BROADCAST = "bcast"
RELAY = "relay"
class OperatorConfigKey:
OPERATORS = "operators"
class Task(object):
def __init__(
self,
name: str,
data: Shareable,
props: Optional[Dict] = None,
timeout: int = 0,
before_task_sent_cb=None,
after_task_sent_cb=None,
result_received_cb=None,
task_done_cb=None,
operator=None,
secure=False,
):
"""Init the Task.
A task is a piece of work that is assigned by the Controller to client workers.
Depending on how the task is assigned (broadcast, send, or relay), the task will be performed by one or more clients.
Args:
name (str): name of the task
data (Shareable): data of the task
props: Any additional properties of the task
timeout: How long this task will last. If == 0, the task never time out.
before_task_sent_cb: If provided, this callback would be called before controller sends the tasks to clients.
It needs to follow the before_task_sent_cb_signature.
after_task_sent_cb: If provided, this callback would be called after controller sends the tasks to clients.
It needs to follow the after_task_sent_cb_signature.
result_received_cb: If provided, this callback would be called when controller receives results from clients.
It needs to follow the result_received_cb_signature.
task_done_cb: If provided, this callback would be called when task is done.
It needs to follow the task_done_cb_signature.
operator: task operator that describes the operation of the task
secure: should this task be transmitted in a secure way
"""
if not isinstance(name, str):
raise TypeError("name must be str, but got {}.".format(type(name)))
if not isinstance(data, Shareable):
raise TypeError("data must be an instance of Shareable, but got {}.".format(type(data)))
if operator and not isinstance(operator, dict):
raise TypeError(f"operator must be a dict but got {type(operator)}")
self.name = name # name of the task
self.data = data # task data to be sent to client(s)
self.operator = operator
self.cb_lock = threading.Lock()
self.secure = secure
data.set_header(ReservedHeaderKey.TASK_NAME, name)
if props is None:
self.props = {}
else:
if not isinstance(props, dict):
raise TypeError("props must be None or dict, but got {}.".format(type(props)))
self.props = props
if not isinstance(timeout, int):
raise TypeError("timeout must be an int, but got {}.".format(type(timeout)))
if timeout < 0:
raise ValueError("timeout must be >= 0, but got {}.".format(timeout))
if before_task_sent_cb is not None and not callable(before_task_sent_cb):
raise TypeError(
"before_task_sent must be a callable function, but got {}.".format(type(before_task_sent_cb))
)
if after_task_sent_cb is not None and not callable(after_task_sent_cb):
raise TypeError(
"after_task_sent_cb must be a callable function, but got {}.".format(type(after_task_sent_cb))
)
if result_received_cb is not None and not callable(result_received_cb):
raise TypeError("result_received must be a callable function, but got {}.".format(type(result_received_cb)))
if task_done_cb is not None and not callable(task_done_cb):
raise TypeError("task_done must be a callable function, but got {}.".format(type(task_done_cb)))
self.timeout = timeout
self.before_task_sent_cb = before_task_sent_cb
self.after_task_sent_cb = after_task_sent_cb
self.result_received_cb = result_received_cb
self.task_done_cb = task_done_cb
self.targets = None
self.client_tasks = [] # list of ClientTasks sent
self.last_client_task_map = {} # dict of: client name => last ClientTask of the client
self.completion_status = None # task completion status
self.is_standing = False # whether the task is still standing
self.schedule_time = None # when the task was scheduled
self.create_time = time.time()
def set_prop(self, key, value):
if key.startswith("__"):
raise ValueError("Keys start with __ is reserved. Please use other key instead of {}.".format(key))
self.props[key] = value
def get_prop(self, key):
return self.props.get(key)
class ClientTask(object):
"""ClientTask records the processing information of a task for a client."""
def __init__(self, client: Client, task: Task):
"""Init ClientTask.
Args:
client: the client
task: the processing information of this task will be recorded
"""
self.client = client
self.task = task
self.id = str(uuid.uuid4())
self.task_send_count = 0 # number of times the task is sent to the client
self.task_sent_time = None # last time the task was sent to the client
self.result_received_time = None # time when the result was received from the client
self.result = None # result submitted by the client, or processed result
self.props = {} # callbacks can use this dict to keep additional processing info
class SendOrder(Enum):
ANY = "any"
SEQUENTIAL = "sequential"
def before_task_sent_cb_signature(client_task: ClientTask, fl_ctx: FLContext):
"""Signature of the before_task_sent CB.
Called before sending a task to a client.
Usually used to prepare the FL Context, which is created to process client's task req
You can also use this CB to alter the data of the task to be sent.
Args:
client_task: the client task that is about to be sent
fl_ctx: the FL context that comes with the client's task request.
Public properties you set to this context will be sent to the client!
"""
pass
def after_task_sent_cb_signature(client_task: ClientTask, fl_ctx: FLContext):
"""Signature of the after_task_sent CB.
Called after sending a task to a client.
Usually used to clean up the FL Context or the Task data
Args:
client_task: the client task that has been sent
fl_ctx: the FL context that comes with the client's task request.
"""
pass
def result_received_cb_signature(client_task: ClientTask, fl_ctx: FLContext):
"""Signature of result_received CB.
Called after a result is received from a client
Args:
client_task: the client task that the result is for
fl_ctx: the FL context that comes with the client's result submission
"""
pass
def task_done_cb_signature(task: Task, fl_ctx: FLContext):
"""Signature of task_done CB.
Called when the task is completed.
Args:
task: the task that is completed
fl_ctx: an instance of FL Context used for this call only.
"""
pass
class ControllerSpec(ABC):
@abstractmethod
def start_controller(self, fl_ctx: FLContext):
"""Starts the controller.
This method is called at the beginning of the RUN.
Args:
fl_ctx: the FL context. You can use this context to access services provided by the
framework. For example, you can get Command Register from it and register your
admin command modules.
"""
pass
@abstractmethod
def stop_controller(self, fl_ctx: FLContext):
"""Stops the controller.
This method is called right before the RUN is ended.
Args:
fl_ctx: the FL context. You can use this context to access services provided by the
framework. For example, you can get Command Register from it and unregister your
admin command modules.
"""
pass
@abstractmethod
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
"""Process result when no task is found for it.
This is called when a result submission is received from a client, but no standing
task can be found for it (from the task queue)
This could happen when:
- the client's submission is too late - the task is already completed
- the Controller lost the task, e.g. the Server is restarted
Args:
client: the client that the result comes from
task_name: the name of the task
client_task_id: ID of the task
result: the result from the client
fl_ctx: the FL context that comes with the client's submission
"""
pass
def broadcast(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 0,
wait_time_after_min_received: int = 0,
):
"""Schedule to broadcast the task to specified targets.
This is a non-blocking call.
The task is standing until one of the following conditions comes true:
- if timeout is specified (> 0), and the task has been standing for more than the specified time
- the controller has received the specified min_responses results for this task, and all target clients
are done.
- the controller has received the specified min_responses results for this task, and has waited
for wait_time_after_min_received.
While the task is standing:
- Before sending the task to a client, the before_task_sent CB (if specified) is called;
- When a result is received from a client, the result_received CB (if specified) is called;
After the task is done, the task_done CB (if specified) is called:
- If result_received CB is specified, the 'result' in the ClientTask of each
client is produced by the result_received CB;
- Otherwise, the 'result' contains the original result submitted by the clients;
NOTE: if the targets is None, the actual broadcast target clients will be dynamic, because the clients
could join/disconnect at any moment. While the task is standing, any client that joins automatically
becomes a target for this broadcast.
Args:
task: the task to be sent
fl_ctx: the FL context
targets: list of destination clients. None means all clients are determined dynamically;
min_responses: the min number of responses expected. If == 0, must get responses from
all clients that the task has been sent to;
wait_time_after_min_received: how long (secs) to wait after the min_responses is received.
If == 0, end the task immediately after the min responses are received;
"""
pass
def broadcast_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 0,
wait_time_after_min_received: int = 0,
abort_signal: Signal = None,
):
"""This is the blocking version of the 'broadcast' method.
First, the task is scheduled for broadcast (see the broadcast method);
It then waits until the task is completed.
Args:
task: the task to be sent
fl_ctx: the FL context
targets: list of destination clients. None means all clients are determined dynamically.
min_responses: the min number of responses expected. If == 0, must get responses from
all clients that the task has been sent to;
wait_time_after_min_received: how long (secs) to wait after the min_responses is received.
If == 0, end the task immediately after the min responses are received;
abort_signal: the abort signal. If triggered, this method stops waiting and returns to the caller.
"""
pass
def broadcast_forever(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
):
"""Schedule a broadcast task that never ends until timeout or explicitly cancelled.
All clients will get the task every time it asks for a new task.
This is a non-blocking call.
NOTE: you can change the content of the task in the before_task_sent function.
Args:
task: the task to be sent
fl_ctx: the FL context
targets: list of destination clients. None means all clients are determined dynamically.
"""
pass
def send(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
):
"""Schedule to send the task to a single target client.
This is a non-blocking call.
In ANY order, the target client is the first target that asks for task.
In SEQUENTIAL order, the controller will try its best to send the task to the first client
in the targets list. If can't, it will try the next target, and so on.
NOTE: if the 'targets' is None, the actual target clients will be dynamic, because the clients
could join/disconnect at any moment. While the task is standing, any client that joins automatically
becomes a target for this task.
If the send_order is SEQUENTIAL, the targets must be a non-empty list of client names.
Args:
task: the task to be sent
fl_ctx: the FL context
targets: list of candidate target clients.
send_order: how to choose the client to send the task.
task_assignment_timeout: in SEQUENTIAL order, this is the wait time for trying a target client, before trying next target.
"""
pass
def send_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
abort_signal: Signal = None,
):
"""This is the blocking version of the 'send' method.
First, the task is scheduled for send (see the 'send' method);
It then waits until the task is completed and returns the task completion status and collected result.
Args:
task: the task to be performed by each client
fl_ctx: the FL context for scheduling the task
targets: list of clients. If None, all clients.
send_order: how to choose the next client
task_assignment_timeout: how long to wait for the expected client to get assigned
before assigning to next client.
abort_signal: the abort signal. If triggered, this method stops waiting and returns to the caller.
"""
pass
def relay(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
task_result_timeout: int = 0,
dynamic_targets: bool = True,
):
"""Schedules a task to be done sequentially by the clients in the targets list. This is a non-blocking call.
Args:
task: the task to be performed by each client
fl_ctx: the FL context for scheduling the task
targets: list of clients. If None, all clients.
send_order: how to choose the next client
task_assignment_timeout: how long to wait for the expected client to get assigned
before assigning to next client.
task_result_timeout: how long to wait for result from the assigned client before giving up.
dynamic_targets: whether to dynamically grow the target list. If True, then the target list is
expanded dynamically when a new client joins.
"""
pass
def relay_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order=SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
task_result_timeout: int = 0,
dynamic_targets: bool = True,
abort_signal: Signal = None,
):
"""This is the blocking version of 'relay'."""
pass
def get_num_standing_tasks(self) -> int:
"""Gets tasks that are currently standing.
Returns: length of the list of standing tasks
"""
pass
def cancel_task(
self,
task: Task,
completion_status: TaskCompletionStatus = TaskCompletionStatus.CANCELLED,
fl_ctx: Optional[FLContext] = None,
):
"""Cancels the specified task.
If the task is standing, the task is cancelled immediately (and removed from job queue) and calls
the task_done CB (if specified);
If the task is not standing, this method has no effect.
Args:
task: the task to be cancelled
completion_status: the TaskCompletionStatus of the task
fl_ctx: the FL context
"""
pass
def cancel_all_tasks(self, completion_status=TaskCompletionStatus.CANCELLED, fl_ctx: Optional[FLContext] = None):
"""Cancels all standing tasks.
Args:
completion_status: the TaskCompletionStatus of the task
fl_ctx: the FL context
"""
pass
| NVFlare-main | nvflare/apis/controller_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.apis.utils.fl_context_utils import generate_log_message
from nvflare.security.logging import secure_format_traceback
from .analytix import AnalyticsData, AnalyticsDataType
from .event_type import EventType
from .fl_constant import EventScope, FedEventHeader, FLContextKey, LogMessageTag
from .fl_context import FLContext
from .persistable import StatePersistable
from .shareable import Shareable
class FLComponent(StatePersistable):
def __init__(self):
"""Init FLComponent.
The FLComponent is the base class of all FL Components.
(executors, controllers, responders, filters, aggregators, and widgets are all FLComponents)
FLComponents have the capability to handle and fire events and contain various methods for logging.
"""
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
@property
def name(self):
return self._name
def _fire(self, event_type: str, fl_ctx: FLContext):
fl_ctx.set_prop(FLContextKey.EVENT_ORIGIN, self._name, private=True, sticky=False)
engine = fl_ctx.get_engine()
if engine is None:
self.log_error(fl_ctx=fl_ctx, msg="Logic Error: no engine in fl_ctx: {}".format(fl_ctx), fire_event=False)
else:
engine.fire_event(event_type, fl_ctx)
def fire_event(self, event_type: str, fl_ctx: FLContext):
"""Fires an event.
Args:
event_type (str): The type of event.
fl_ctx (FLContext): FLContext information.
"""
if not isinstance(event_type, str):
raise TypeError("expect event_type to be str, but got {}".format(type(event_type)))
if not event_type:
raise ValueError("event_type must be specified")
if not isinstance(fl_ctx, FLContext):
raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
fl_ctx.set_prop(FLContextKey.EVENT_SCOPE, value=EventScope.LOCAL, private=True, sticky=False)
self._fire(event_type, fl_ctx)
def fire_fed_event(self, event_type: str, event_data: Shareable, fl_ctx: FLContext, targets=None):
"""Fires a federation event.
A federation event means that the event will be sent to different sites.
For example, if fire a federation event on the server side, one can decide what clients to send via the
parameter `targets`.
If fire a federation event on the client side, the event will be sent to the server.
Args:
event_type (str): The type of event.
event_data (Shareable): The data of this fed event.
fl_ctx (FLContext): FLContext information.
targets: The targets to send to. It is only used when fire federation event from server side.
"""
if not isinstance(fl_ctx, FLContext):
raise TypeError("expect fl_ctx to be FLContext, but got {}".format(type(fl_ctx)))
if not isinstance(event_data, Shareable):
raise TypeError("expect event_data to be Shareable, but got {}".format(type(event_data)))
event_data.set_header(key=FedEventHeader.TARGETS, value=targets)
fl_ctx.set_prop(FLContextKey.EVENT_DATA, event_data, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.EVENT_SCOPE, value=EventScope.FEDERATION, private=True, sticky=False)
self._fire(event_type, fl_ctx)
def system_panic(self, reason: str, fl_ctx: FLContext):
"""Signals a fatal condition that could cause the RUN to end.
Args:
reason (str): The reason for panic.
fl_ctx (FLContext): FLContext information.
"""
fl_ctx.set_prop(FLContextKey.EVENT_DATA, reason, private=True, sticky=False)
self.fire_event(EventType.FATAL_SYSTEM_ERROR, fl_ctx)
def handle_event(self, event_type: str, fl_ctx: FLContext):
"""Handles events.
Args:
event_type (str): event type fired by workflow.
fl_ctx (FLContext): FLContext information.
"""
pass
def log_info(self, fl_ctx: FLContext, msg: str, fire_event=False):
"""Logs a message with logger.info.
These log_XXX methods are implemented because we want to have a unified way of logging messages.
For example, in this method, we are using generate_log_message to add the FLContext information
into the message. And we can decide whether to fire a log event afterwards.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.info(log_msg)
if fire_event:
self._fire_log_event(
event_type=EventType.INFO_LOG_AVAILABLE, log_tag=LogMessageTag.INFO, log_msg=log_msg, fl_ctx=fl_ctx
)
def log_warning(self, fl_ctx: FLContext, msg: str, fire_event=True):
"""Logs a message with logger.warning.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.warning(log_msg)
if fire_event:
self._fire_log_event(
event_type=EventType.WARNING_LOG_AVAILABLE,
log_tag=LogMessageTag.WARNING,
log_msg=log_msg,
fl_ctx=fl_ctx,
)
def log_error(self, fl_ctx: FLContext, msg: str, fire_event=True):
"""Logs a message with logger.error.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.error(log_msg)
if fire_event:
self._fire_log_event(
event_type=EventType.ERROR_LOG_AVAILABLE, log_tag=LogMessageTag.ERROR, log_msg=log_msg, fl_ctx=fl_ctx
)
def log_debug(self, fl_ctx: FLContext, msg: str, fire_event=False):
"""Logs a message with logger.debug.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.debug(log_msg)
if fire_event:
self._fire_log_event(
event_type=EventType.DEBUG_LOG_AVAILABLE, log_tag=LogMessageTag.DEBUG, log_msg=log_msg, fl_ctx=fl_ctx
)
def log_critical(self, fl_ctx: FLContext, msg: str, fire_event=True):
"""Logs a message with logger.critical.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.critical(log_msg)
if fire_event:
self._fire_log_event(
event_type=EventType.CRITICAL_LOG_AVAILABLE,
log_tag=LogMessageTag.CRITICAL,
log_msg=log_msg,
fl_ctx=fl_ctx,
)
def log_exception(self, fl_ctx: FLContext, msg: str, fire_event=False):
"""Logs exception message with logger.error.
Args:
fl_ctx (FLContext): FLContext information.
msg (str): The message to log.
fire_event (bool): Whether to fire a log event. Unused.
"""
log_msg = generate_log_message(fl_ctx, msg)
self.logger.error(log_msg)
ex_text = secure_format_traceback()
self.logger.error(ex_text)
if fire_event:
ex_msg = "{}\n{}".format(log_msg, ex_text)
self._fire_log_event(
event_type=EventType.EXCEPTION_LOG_AVAILABLE,
log_tag=LogMessageTag.EXCEPTION,
log_msg=ex_msg,
fl_ctx=fl_ctx,
)
def _fire_log_event(self, event_type: str, log_tag: str, log_msg: str, fl_ctx: FLContext):
if not fl_ctx:
return
event_data = AnalyticsData(key=log_tag, value=log_msg, data_type=AnalyticsDataType.TEXT, kwargs=None)
dxo = event_data.to_dxo()
fl_ctx.set_prop(key=FLContextKey.EVENT_DATA, value=dxo.to_shareable(), private=True, sticky=False)
self.fire_event(event_type=event_type, fl_ctx=fl_ctx)
| NVFlare-main | nvflare/apis/fl_component.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
class Client:
def __init__(self, name, token) -> None:
"""Init Client.
Represents a client, and is managed by the client manager.
The token is a uuid used for authorization.
Args:
name: client name
token: client token
"""
self.name = name
self.token = token
self.last_connect_time = time.time()
self.props = {}
def set_token(self, token):
self.token = token
def get_token(self):
return self.token
def set_prop(self, name, value):
self.props[name] = value
def get_prop(self, name, default=None):
return self.props.get(name, default)
| NVFlare-main | nvflare/apis/client.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Union
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import Job, RunStatus
class JobDefManagerSpec(FLComponent, ABC):
"""Job Definition Management API."""
@abstractmethod
def create(self, meta: dict, uploaded_content: bytes, fl_ctx: FLContext) -> Dict[str, Any]:
"""Create a new job permanently.
The caller must have validated the content already and created initial meta. Receives bytes of uploaded folder,
uploading to permanent store, create unique Job ID (jid) and return meta.
Args:
meta: caller-provided meta info
uploaded_content: data of the job definition
fl_ctx (FLContext): FLContext information
Returns:
A dict containing meta info. Additional meta info are added, especially
a unique Job ID (jid) which has been created.
"""
pass
@abstractmethod
def get_job(self, jid: str, fl_ctx: FLContext) -> Job:
"""Gets the Job object through the job ID.
Args:
jid (str): Job ID
fl_ctx (FLContext): FLContext information
Returns:
A Job object
"""
pass
@abstractmethod
def get_app(self, job: Job, app_name: str, fl_ctx: FLContext) -> bytes:
"""Get the contents of the specified app in bytes.
Args:
job: Job object
app_name: name of the app to get
fl_ctx (FLContext): FLContext information
Returns:
Content of the specified app in bytes
"""
pass
@abstractmethod
def get_apps(self, job: Job, fl_ctx: FLContext) -> Dict[str, bytes]:
"""Get the all the apps of a Job.
Args:
job: Job object
fl_ctx (FLContext): FLContext information
Returns:
A dictionary of app names with the content of the corresponding app encoded in bytes
"""
pass
@abstractmethod
def get_content(self, jid: str, fl_ctx: FLContext) -> Optional[bytes]:
"""Gets the entire uploaded content for a Job.
Args:
jid (str): Job ID
fl_ctx (FLContext): FLContext information
Returns:
Uploaded content of the job in bytes
"""
pass
@abstractmethod
def get_job_data(self, jid: str, fl_ctx: FLContext) -> dict:
"""Gets the entire uploaded content and workspace for a job.
Args:
jid (str): Job ID
fl_ctx (FLContext): FLContext information
Returns:
a dict to hold the job data and workspace. With the format: {JobDataKey.JOB_DATA.value: stored_data, JobDataKey.WORKSPACE_DATA: workspace_data}
"""
pass
@abstractmethod
def update_meta(self, jid: str, meta, fl_ctx: FLContext):
"""Update the meta of an existing Job.
Args:
jid (str): Job ID
meta: dictionary of metadata for the job
fl_ctx (FLContext): FLContext information
"""
pass
@abstractmethod
def refresh_meta(self, job: Job, meta_keys: list, fl_ctx: FLContext):
"""Refresh meta of the job as specified in the meta keys
Save the values of the specified keys into job store
Args:
job: job object
meta_keys: meta keys need to updated
fl_ctx: FLContext
"""
pass
@abstractmethod
def set_status(self, jid: str, status: RunStatus, fl_ctx: FLContext):
"""Set status of an existing Job.
Args:
jid (str): Job ID
status (RunStatus): status to set
fl_ctx (FLContext): FLContext information
"""
pass
@abstractmethod
def get_all_jobs(self, fl_ctx: FLContext) -> List[Job]:
"""Gets all Jobs in the system.
Args:
fl_ctx (FLContext): FLContext information
Returns:
A list of all jobs
"""
pass
@abstractmethod
def get_jobs_by_status(self, run_status: RunStatus, fl_ctx: FLContext) -> List[Job]:
"""Gets Jobs of a specified status.
Args:
run_status (RunStatus): status to filter for
fl_ctx (FLContext): FLContext information
Returns:
A list of Jobs of the specified status
"""
pass
@abstractmethod
def get_jobs_waiting_for_review(self, reviewer_name: str, fl_ctx: FLContext) -> List[Job]:
"""Gets Jobs waiting for review for the specified user.
Args:
reviewer_name (str): reviewer name
fl_ctx (FLContext): FLContext information
Returns:
A list of Jobs waiting for review for the specified user.
"""
pass
@abstractmethod
def set_approval(
self, jid: str, reviewer_name: str, approved: bool, note: str, fl_ctx: FLContext
) -> Dict[str, Any]:
"""Sets the approval for the specified user for a certain Job.
Args:
jid (str): job id
reviewer_name (str): reviewer name
approved (bool): whether job is approved
note (str): any note message
fl_ctx (FLContext): FLContext information
Returns:
A dictionary of Job metadata.
"""
pass
@abstractmethod
def delete(self, jid: str, fl_ctx: FLContext):
"""Deletes the specified Job.
Args:
jid (str): Job ID
fl_ctx (FLContext): FLContext information
"""
pass
@abstractmethod
def save_workspace(self, jid: str, data: Union[bytes, str], fl_ctx: FLContext):
"""Save the job workspace to the job storage.
Args:
jid (str): Job ID
data: Job workspace data or name of data file
fl_ctx (FLContext): FLContext information
"""
pass
@abstractmethod
def get_storage_component(self, jid: str, component: str, fl_ctx: FLContext):
"""Get the workspace data from the job storage.
Args:
jid (str): Job ID
component: storage component name
fl_ctx (FLContext): FLContext information
"""
pass
@abstractmethod
def get_storage_for_download(
self, jid: str, download_dir: str, component: str, download_file: str, fl_ctx: FLContext
):
"""Get the workspace data from the job storage.
Args:
jid (str): Job ID
download_dir: download folder
component: storage component name
download_file: download file name
fl_ctx (FLContext): FLContext information
"""
pass
| NVFlare-main | nvflare/apis/job_def_manager_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from ..fuel.utils import fobs
from .fl_constant import ReservedKey, ReturnCode
class ReservedHeaderKey(object):
HEADERS = "__headers__"
TOPIC = "__topic__"
RC = ReservedKey.RC
COOKIE_JAR = ReservedKey.COOKIE_JAR
PEER_PROPS = "__peer_props__"
REPLY_IS_LATE = "__reply_is_late__"
TASK_NAME = ReservedKey.TASK_NAME
TASK_ID = ReservedKey.TASK_ID
WORKFLOW = ReservedKey.WORKFLOW
AUDIT_EVENT_ID = ReservedKey.AUDIT_EVENT_ID
CONTENT_TYPE = "__content_type__"
TASK_OPERATOR = "__task_operator__"
class Shareable(dict):
"""The information communicated between server and client.
Shareable is just a dict that can have any keys and values, defined by developers and users.
It is recommended that keys are strings. Values must be serializable.
"""
def __init__(self):
"""Init the Shareable."""
super().__init__()
self[ReservedHeaderKey.HEADERS] = {}
def set_header(self, key: str, value):
header = self.get(ReservedHeaderKey.HEADERS, None)
if not header:
header = {}
self[ReservedHeaderKey.HEADERS] = header
header[key] = value
def get_header(self, key: str, default=None):
header = self.get(ReservedHeaderKey.HEADERS, None)
if not header:
return default
else:
if not isinstance(header, dict):
raise ValueError("header object must be a dict, but got {}".format(type(header)))
return header.get(key, default)
# some convenience methods
def get_return_code(self, default=ReturnCode.OK):
return self.get_header(ReservedHeaderKey.RC, default)
def set_return_code(self, rc):
self.set_header(ReservedHeaderKey.RC, rc)
def add_cookie(self, name: str, data):
"""Add a cookie that is to be sent to the client and echoed back in response.
This method is intended to be called by the Server side.
Args:
name: the name of the cookie
data: the data of the cookie, which must be serializable
"""
cookie_jar = self.get_cookie_jar()
if not cookie_jar:
cookie_jar = {}
self.set_header(key=ReservedHeaderKey.COOKIE_JAR, value=cookie_jar)
cookie_jar[name] = data
def get_cookie_jar(self):
return self.get_header(key=ReservedHeaderKey.COOKIE_JAR, default=None)
def set_cookie_jar(self, jar):
self.set_header(key=ReservedHeaderKey.COOKIE_JAR, value=jar)
def get_cookie(self, name: str, default=None):
jar = self.get_cookie_jar()
if not jar:
return default
return jar.get(name, default)
def set_peer_props(self, props: dict):
self.set_header(ReservedHeaderKey.PEER_PROPS, props)
def get_peer_props(self):
return self.get_header(ReservedHeaderKey.PEER_PROPS, None)
def get_peer_prop(self, key: str, default):
props = self.get_peer_props()
if not isinstance(props, dict):
return default
return props.get(key, default)
def to_bytes(self) -> bytes:
"""Serialize the Model object into bytes.
Returns:
object serialized in bytes.
"""
return fobs.dumps(self)
@classmethod
def from_bytes(cls, data: bytes):
"""Convert the data bytes into Model object.
Args:
data: a bytes object
Returns:
an object loaded by FOBS from data
"""
return fobs.loads(data)
# some convenience functions
def make_reply(rc, headers=None) -> Shareable:
reply = Shareable()
reply.set_return_code(rc)
if headers and isinstance(headers, dict):
for k, v in headers.items():
reply.set_header(k, v)
return reply
def make_copy(source: Shareable) -> Shareable:
"""
Make a copy from the source.
The content (non-headers) will be kept intact. Headers will be deep-copied into the new instance.
"""
assert isinstance(source, Shareable)
c = copy.copy(source)
headers = source.get(ReservedHeaderKey.HEADERS, None)
if headers:
new_headers = copy.deepcopy(headers)
else:
new_headers = {}
c[ReservedHeaderKey.HEADERS] = new_headers
return c
| NVFlare-main | nvflare/apis/shareable.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/apis/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
from .fl_context import FLContext
from .job_def import Job
from .job_def_manager_spec import JobDefManagerSpec
class DispatchInfo:
"""Information needed for dispatch"""
def __init__(self, app_name: str, resource_requirements: dict, token: Optional[str]):
self.app_name = app_name
self.resource_requirements = resource_requirements
self.token = token
def __eq__(self, other):
return (
self.app_name == other.app_name
and self.resource_requirements == other.resource_requirements
and self.token == other.token
)
def __repr__(self):
return f"{self.__class__.__name__}: app_name: {self.app_name}, resource_requirements: {self.resource_requirements}, token: {self.token}"
class JobSchedulerSpec(ABC):
@abstractmethod
def schedule_job(
self, job_manager: JobDefManagerSpec, job_candidates: List[Job], fl_ctx: FLContext
) -> (Optional[Job], Optional[Dict[str, DispatchInfo]]):
"""Try to schedule a Job.
Args:
job_manager: JobDefManager
job_candidates: The candidate to choose from.
fl_ctx: FLContext.
Returns:
A tuple of (job, sites_dispatch_info, failed_jobs, blocked_jobs):
job is the Job that satisfies the criteria of the scheduler.
sites_dispatch_info is dict of {site name: DispatchInfo} for the job.
"""
pass
| NVFlare-main | nvflare/apis/job_scheduler_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Union
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import ControllerSpec
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
class OperatorSpec(ABC):
@abstractmethod
def operate(
self,
op_description: dict,
controller: ControllerSpec,
task_name: str,
task_data: Shareable,
abort_signal: Signal,
fl_ctx: FLContext,
) -> Union[Shareable, None]:
pass
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
pass
| NVFlare-main | nvflare/apis/operator_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Tuple
class EngineSpec(ABC):
@abstractmethod
def validate_targets(self, target_names: List[str]) -> Tuple[List, List[str]]:
"""Validate specified target names.
Args:
target_names: list of names to be validated
Returns: a list of validate targets and a list of invalid target names
"""
pass
| NVFlare-main | nvflare/apis/engine_spec.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from .fl_snapshot import FLSnapshot, RunSnapshot
class StatePersistor(ABC):
@abstractmethod
def save(self, snapshot: RunSnapshot) -> str:
"""Saves the snapshot of the FL state to storage.
Args:
snapshot: RunSnapshot object
Returns:
Storage location.
"""
pass
@abstractmethod
def retrieve(self) -> FLSnapshot:
"""Loads the persisted FL components snapshot from the persisted location.
Returns:
An FLSnapshot
"""
pass
@abstractmethod
def retrieve_run(self, job_id: str) -> RunSnapshot:
"""Loads the persisted RunSnapshot of a job_id from the persisted location.
Args:
job_id: job_id
Returns:
A RunSnapshot of the job_id
"""
pass
@abstractmethod
def delete(self):
"""Deletes the FL component snapshot."""
pass
@abstractmethod
def delete_run(self, job_id: str):
"""Deletes the RunSnapshot of a job_id"""
pass
| NVFlare-main | nvflare/apis/state_persistor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FLCommunicationError(Exception):
"""Base class for fed_learn communication exceptions."""
def __init__(self, message, exception=None):
"""Init the FLCommunicationError.
Args:
exception: grpc.RpcError when trying to register gprc channel
"""
super().__init__()
# Copy all the exception properties into FLCommunicationError instance.
if exception:
self.__dict__.update(exception.__dict__)
self.message = message
class UnsafeJobError(Exception):
"""Raised when a job is detected to be unsafe"""
pass
class NotAuthorized(Exception):
"""Raised when a job is not authorized"""
pass
class UnsafeComponentError(Exception):
"""Raised when a component in the configuration is detected to be unsafe"""
pass
class TaskExecutionError(Exception):
"""Raised when a task execution failed"""
pass
| NVFlare-main | nvflare/apis/fl_exception.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Tuple
DATA = "data"
JOB_ZIP = "job.zip"
META = "meta"
META_JSON = "meta.json"
WORKSPACE = "workspace"
WORKSPACE_ZIP = "workspace.zip"
MANIFEST = "manifest.json"
class StorageException(Exception):
"""Base class for Storage exceptions."""
pass
class StorageSpec(ABC):
"""Functional spec of object storage.
An object is identified by a URI (unique resource identifier).
Each object contains:
- content (data)
- meta info that describes the control info of the object.
"""
@abstractmethod
def create_object(self, uri: str, data, meta: dict, overwrite_existing: bool):
"""Creates an object.
Examples of URI:
/state/engine/...
/runs/approved/covid_exam.3
/runs/pending/spleen_seg.1
Args:
uri: URI of the object
data: content of the object
meta: meta info of the object
overwrite_existing: whether to overwrite the object if already exists
Raises StorageException when:
- invalid args
- object already exists and overwrite_existing is False
- error creating the object
"""
pass
@abstractmethod
def update_object(self, uri: str, data, component_name: str):
"""Update the object
Args:
uri: URI of the object
data: content data of the component, or the content file location
component_name: component name
Raises StorageException when the object does not exit.
"""
pass
@abstractmethod
def update_meta(self, uri: str, meta: dict, replace: bool):
"""Updates the meta info of the specified object.
Args:
uri: URI of the object
meta: value of new meta info
replace: whether to replace the current meta completely or partial update
Raises StorageException when:
- invalid args
- no such object
- error updating the object
"""
pass
@abstractmethod
def list_objects(self, path: str) -> List[str]:
"""Lists all objects in the specified path.
Args:
path: the path to the objects
Returns:
list of URIs of objects
"""
pass
@abstractmethod
def get_meta(self, uri: str) -> dict:
"""Gets user defined meta info of the specified object.
Args:
uri: URI of the object
Returns:
meta info of the object.
if object does not exist, return empty dict {}
Raises StorageException when:
- invalid args
"""
pass
@abstractmethod
def get_data(self, uri: str, component_name: str = DATA) -> bytes:
"""Gets data of the specified object.
Args:
uri: URI of the object
component_name: storage component name
Returns:
data of the object.
if object does not exist, return None
Raises StorageException when:
- invalid args
"""
pass
@abstractmethod
def get_data_for_download(self, uri: str, component_name: str = DATA, download_file: str = None):
"""Gets data of the specified object.
Args:
uri: URI of the object
component_name: storage component name
download_file: component file_name for download
Raises StorageException when:
- invalid args
"""
pass
@abstractmethod
def get_detail(self, uri: str) -> Tuple[dict, bytes]:
"""Gets both data and meta of the specified object.
Args:
uri: URI of the object
Returns:
meta info and data of the object.
Raises StorageException when:
- invalid args
- no such object
"""
pass
@abstractmethod
def delete_object(self, uri: str):
"""Deletes specified object.
Args:
uri: URI of the object
"""
pass
@staticmethod
def is_valid_component(component_name):
return component_name in [DATA, META, WORKSPACE, MANIFEST]
| NVFlare-main | nvflare/apis/storage.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from requests import Response
from .fl_context import FLContext
@dataclass
class SP:
name: str = ""
fl_port: str = ""
admin_port: str = ""
service_session_id: str = ""
primary: bool = False
props: dict = field(default_factory=dict)
class OverseerAgent(ABC):
def __init__(self):
self.overseer_info = {}
def initialize(self, fl_ctx: FLContext):
pass
@abstractmethod
def set_secure_context(self, ca_path: str, cert_path: str = "", prv_key_path: str = ""):
pass
@abstractmethod
def start(self, update_callback=None, conditional_cb=False):
pass
@abstractmethod
def pause(self):
pass
@abstractmethod
def resume(self):
pass
@abstractmethod
def end(self):
pass
@abstractmethod
def is_shutdown(self) -> bool:
"""Return whether the agent receives a shutdown request."""
pass
@abstractmethod
def get_primary_sp(self) -> SP:
"""Return current primary service provider.
If primary sp not available, such as not reported by SD, connection to SD not established yet
the name and ports will be empty strings.
"""
pass
@abstractmethod
def promote_sp(self, sp_end_point, headers=None) -> Response:
pass
@abstractmethod
def set_state(self, state) -> Response:
pass
| NVFlare-main | nvflare/apis/overseer_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
class ContentBlockedException(Exception):
"""
A filter should raise this exception when the content is to be blocked
"""
pass
class FilterChainType(object):
TASK_DATA_CHAIN = "task_data"
TASK_RESULT_CHAIN = "task_result"
class FilterSource(object):
JOB = "job"
SITE = "site"
class FilterContextKey(object):
SOURCE = "__source"
CHAIN_TYPE = "__chain_type"
class Filter(FLComponent, ABC):
@abstractmethod
def process(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
"""Filter process applied to the Shareable object.
Args:
shareable: shareable
fl_ctx: FLContext
Returns:
a Shareable object
"""
pass
def set_prop(self, key: str, value):
setattr(self, key, value)
def get_prop(self, key: str, default=None):
try:
return getattr(self, key)
except AttributeError:
return default
| NVFlare-main | nvflare/apis/filter.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
from nvflare.apis.fl_constant import WorkspaceConstants
class Workspace:
def __init__(self, root_dir: str, site_name: str = "", config_folder: str = "config"):
"""Define a workspace.
NOTE::
Example of client workspace folder structure:
Workspace ROOT
local
authorization.json.default
resources.json.default
custom/
custom python code
...
startup (optional)
provisioned content
fed_client.json
run_1
app
config (required)
configurations
custom (optional)
custom python code
other_folder (app defined)
log.txt
job_meta.json
...
Args:
root_dir: root directory of the workspace
site_name: site name of the workspace
config_folder: where to find required config inside an app
"""
self.root_dir = root_dir
self.site_name = site_name
self.config_folder = config_folder
# check to make sure the workspace is valid
if not os.path.isdir(root_dir):
raise RuntimeError(f"invalid workspace {root_dir}: it does not exist or not a valid dir")
startup_dir = self.get_startup_kit_dir()
if not os.path.isdir(startup_dir):
raise RuntimeError(
f"invalid workspace {root_dir}: missing startup folder '{startup_dir}' or not a valid dir"
)
site_dir = self.get_site_config_dir()
if not os.path.isdir(site_dir):
raise RuntimeError(
f"invalid workspace {root_dir}: missing site config folder '{site_dir}' or not a valid dir"
)
def _fallback_path(self, file_names: [str]):
for n in file_names:
f = self.get_file_path_in_site_config(n)
if os.path.exists(f):
return f
return None
def get_authorization_file_path(self):
return self._fallback_path(
[WorkspaceConstants.AUTHORIZATION_CONFIG, WorkspaceConstants.DEFAULT_AUTHORIZATION_CONFIG]
)
def get_resources_file_path(self):
return self._fallback_path([WorkspaceConstants.RESOURCES_CONFIG, WorkspaceConstants.DEFAULT_RESOURCES_CONFIG])
def get_job_resources_file_path(self):
return self.get_file_path_in_site_config(WorkspaceConstants.JOB_RESOURCES_CONFIG)
def get_log_config_file_path(self):
return self._fallback_path([WorkspaceConstants.LOGGING_CONFIG, WorkspaceConstants.DEFAULT_LOGGING_CONFIG])
def get_file_path_in_site_config(self, file_basename: Union[str, List[str]]):
if isinstance(file_basename, str):
return os.path.join(self.get_site_config_dir(), file_basename)
elif isinstance(file_basename, list):
return self._fallback_path(file_basename)
else:
raise ValueError(f"invalid file_basename '{file_basename}': must be str or List[str]")
def get_file_path_in_startup(self, file_basename: str):
return os.path.join(self.get_startup_kit_dir(), file_basename)
def get_file_path_in_root(self, file_basename: str):
return os.path.join(self.root_dir, file_basename)
def get_server_startup_file_path(self):
# this is to get the full path to "fed_server.json"
return self.get_file_path_in_startup(WorkspaceConstants.SERVER_STARTUP_CONFIG)
def get_server_app_config_file_path(self, job_id):
return os.path.join(self.get_app_config_dir(job_id), WorkspaceConstants.SERVER_APP_CONFIG)
def get_client_app_config_file_path(self, job_id):
return os.path.join(self.get_app_config_dir(job_id), WorkspaceConstants.CLIENT_APP_CONFIG)
def get_client_startup_file_path(self):
# this is to get the full path to "fed_client.json"
return self.get_file_path_in_startup(WorkspaceConstants.CLIENT_STARTUP_CONFIG)
def get_admin_startup_file_path(self):
# this is to get the full path to "fed_admin.json"
return self.get_file_path_in_startup(WorkspaceConstants.ADMIN_STARTUP_CONFIG)
def get_site_config_dir(self) -> str:
return os.path.join(self.root_dir, WorkspaceConstants.SITE_FOLDER_NAME)
def get_site_custom_dir(self) -> str:
return os.path.join(self.get_site_config_dir(), WorkspaceConstants.CUSTOM_FOLDER_NAME)
def get_startup_kit_dir(self) -> str:
return os.path.join(self.root_dir, WorkspaceConstants.STARTUP_FOLDER_NAME)
def get_audit_file_path(self) -> str:
return os.path.join(self.root_dir, WorkspaceConstants.AUDIT_LOG)
def get_log_file_path(self) -> str:
return os.path.join(self.root_dir, WorkspaceConstants.LOG_FILE_NAME)
def get_root_dir(self) -> str:
return self.root_dir
def get_run_dir(self, job_id: str) -> str:
return os.path.join(self.root_dir, WorkspaceConstants.WORKSPACE_PREFIX + str(job_id))
def get_app_dir(self, job_id: str) -> str:
return os.path.join(self.get_run_dir(job_id), WorkspaceConstants.APP_PREFIX + self.site_name)
def get_app_log_file_path(self, job_id: str) -> str:
return os.path.join(self.get_run_dir(job_id), WorkspaceConstants.LOG_FILE_NAME)
def get_app_config_dir(self, job_id: str) -> str:
return os.path.join(self.get_app_dir(job_id), self.config_folder)
def get_app_custom_dir(self, job_id: str) -> str:
return os.path.join(self.get_app_dir(job_id), WorkspaceConstants.CUSTOM_FOLDER_NAME)
def get_job_meta_path(self, job_id: str) -> str:
return os.path.join(self.get_run_dir(job_id), WorkspaceConstants.JOB_META_FILE)
def get_site_privacy_file_path(self):
return self.get_file_path_in_site_config(WorkspaceConstants.PRIVACY_CONFIG)
def get_client_custom_dir(self) -> str:
return os.path.join(self.get_site_config_dir(), WorkspaceConstants.CUSTOM_FOLDER_NAME)
def get_stats_pool_summary_path(self, job_id: str, prefix=None) -> str:
file_name = WorkspaceConstants.STATS_POOL_SUMMARY_FILE_NAME
if prefix:
file_name = f"{prefix}.{file_name}"
return os.path.join(self.get_run_dir(job_id), file_name)
def get_stats_pool_records_path(self, job_id: str, prefix=None) -> str:
file_name = WorkspaceConstants.STATS_POOL_RECORDS_FILE_NAME
if prefix:
file_name = f"{prefix}.{file_name}"
return os.path.join(self.get_run_dir(job_id), file_name)
| NVFlare-main | nvflare/apis/workspace.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Tuple
from .fl_context import FLContext
class ResourceConsumerSpec(ABC):
@abstractmethod
def consume(self, resources: dict):
pass
class ResourceManagerSpec(ABC):
@abstractmethod
def check_resources(self, resource_requirement: dict, fl_ctx: FLContext) -> Tuple[bool, str]:
"""Checks whether the specified resource requirement can be satisfied.
Args:
resource_requirement: a dict that specifies resource requirement
fl_ctx: the FLContext
Returns:
A tuple of (is_resource_enough, token).
is_resource_enough is a bool indicates whether there is enough resources;
token is for resource reservation / cancellation for this check request.
"""
pass
@abstractmethod
def cancel_resources(self, resource_requirement: dict, token: str, fl_ctx: FLContext):
"""Cancels reserved resources if any.
Args:
resource_requirement: a dict that specifies resource requirement
token: a resource reservation token returned by check_resources
fl_ctx: the FLContext
Note:
If check_resource didn't return a token, then don't need to call this method
"""
pass
@abstractmethod
def allocate_resources(self, resource_requirement: dict, token: str, fl_ctx: FLContext) -> dict:
"""Allocates resources.
Note:
resource requirements and resources may be different things.
Args:
resource_requirement: a dict that specifies resource requirement
token: a resource reservation token returned by check_resources
fl_ctx: the FLContext
Returns:
A dict of allocated resources
"""
pass
@abstractmethod
def free_resources(self, resources: dict, token: str, fl_ctx: FLContext):
"""Frees resources.
Args:
resources: resources to be freed
token: a resource reservation token returned by check_resources
fl_ctx: the FLContext
"""
pass
@abstractmethod
def report_resources(self, fl_ctx) -> dict:
"""Reports resources."""
pass
| NVFlare-main | nvflare/apis/resource_manager_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Tuple
class JobMetaValidatorSpec(ABC):
@abstractmethod
def validate(self, job_name: str, job_data: bytes) -> Tuple[bool, str, dict]:
"""Validate job
Args:
job_name (str): Job name
job_data (bytes): Job ZIP data
Returns:
Tuple[bool, str, dict]: (is_valid, error_message, meta)
"""
pass
| NVFlare-main | nvflare/apis/job_meta_validator_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from .fl_context import FLContext
from .shareable import Shareable
class MessageSendStatus(enum.Enum):
OK = "ok" # message sent and response received
TIMEOUT = "timeout" # message sent but no response received
FAILURE = "failure" # failed to send message
REPLY_ERROR = "reply_error" # error in reply
def aux_request_handle_func_signature(topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""This is the signature of the message_handle_func.
The message_handle_func is a callback function that is registered to handle an aux request of a specific topic.
Any implementation of a message_handle_func must follow this signature.
Example from the client runner:
engine.register_aux_message_handler(topic=ReservedTopic.END_RUN, message_handle_func=self._handle_end_run)
Args:
topic: topic of the message to be handled
request: the message data to be handled
fl_ctx: FL context
Returns: a Shareable response to the requester
"""
pass
| NVFlare-main | nvflare/apis/aux_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class ReturnCode(object):
OK = "OK"
BAD_PEER_CONTEXT = "BAD_PEER_CONTEXT"
BAD_REQUEST_DATA = "BAD_REQUEST_DATA"
BAD_TASK_DATA = "BAD_TASK_DATA"
COMMUNICATION_ERROR = "COMMUNICATION_ERROR"
ERROR = "ERROR"
EXECUTION_EXCEPTION = "EXECUTION_EXCEPTION"
EXECUTION_RESULT_ERROR = "EXECUTION_RESULT_ERROR"
HANDLER_EXCEPTION = "HANDLER_EXCEPTION"
MISSING_PEER_CONTEXT = "MISSING_PEER_CONTEXT"
RUN_MISMATCH = "RUN_MISMATCH"
TASK_ABORTED = "TASK_ABORTED"
TASK_DATA_FILTER_ERROR = "TASK_DATA_FILTER_ERROR"
TASK_RESULT_FILTER_ERROR = "TASK_RESULT_FILTER_ERROR"
TASK_UNKNOWN = "TASK_UNKNOWN"
TASK_UNSUPPORTED = "TASK_UNSUPPORTED"
TOPIC_UNKNOWN = "TOPIC_UNKNOWN"
MODEL_UNRECOGNIZED = "MODEL_UNRECOGNIZED"
VALIDATE_TYPE_UNKNOWN = "VALIDATE_TYPE_UNKNOWN"
EMPTY_RESULT = "EMPTY_RESULT"
UNSAFE_JOB = "UNSAFE_JOB"
SERVER_NOT_READY = "SERVER_NOT_READY"
SERVICE_UNAVAILABLE = "SERVICE_UNAVAILABLE"
class MachineStatus(Enum):
"""Constants for machine status.
Status Lifecycle
STOPPED <-> STARTING -> STARTED -> STOPPING -> STOPPED
"""
STARTING = "starting"
STARTED = "started"
STOPPING = "stopping"
STOPPED = "stopped"
class ReservedKey(object):
MANAGER = "__manager__"
ENGINE = "__engine__"
AUX_RUNNER = "__aux_runner__"
RUN_NUM = "__run_num__"
IDENTITY_NAME = "__identity_name__" # identity of the endpoint (e.g. client name)
PEER_CTX = "__peer_ctx__"
RC = "__rc__"
COOKIE_JAR = "__cookie_jar__"
WORKSPACE_ROOT = "__workspace_root__"
APP_ROOT = "__app_root__"
CLIENT_NAME = "__client_name__"
TASK_NAME = "__task_name__"
TASK_DATA = "__task_data__"
TASK_RESULT = "__task_result__"
TASK_ID = "__task_id__"
EVENT_ID = "__event_id__"
AUDIT_EVENT_ID = "__audit_event_id__"
IS_RESEND = "__is_resend__"
RUNNER = "__runner__"
WORKFLOW = "__workflow__"
REPLY = "__reply__"
EVENT_ORIGIN = "__event_origin__"
EVENT_ORIGIN_SITE = "__event_origin_site__"
EVENT_DATA = "__event_data__"
EVENT_SCOPE = "__event_scope__"
RUN_ABORT_SIGNAL = "__run_abort_signal__"
SHAREABLE = "__shareable__"
SHARED_FL_CONTEXT = "__shared_fl_context__"
ARGS = "__args__"
WORKSPACE_OBJECT = "__workspace_object__"
RANK_NUMBER = "__rank_number__"
NUM_OF_PROCESSES = "__num_of_processes__"
FROM_RANK_NUMBER = "__from_rank_number__"
SECURE_MODE = "__secure_mode__"
SIMULATE_MODE = "__simulate_mode__"
SP_END_POINT = "__sp_end_point__"
JOB_INFO = "__job_info__"
JOB_META = "__job_meta__"
CURRENT_JOB_ID = "__current_job_id__"
JOB_RUN_NUMBER = "__job_run_number__"
JOB_DEPLOY_DETAIL = "__job_deploy_detail__"
FATAL_SYSTEM_ERROR = "__fatal_system_error__"
JOB_IS_UNSAFE = "__job_is_unsafe__"
CUSTOM_PROPS = "__custom_props__"
EXCEPTIONS = "__exceptions__"
class FLContextKey(object):
TASK_NAME = ReservedKey.TASK_NAME
TASK_DATA = ReservedKey.TASK_DATA
TASK_RESULT = ReservedKey.TASK_RESULT
TASK_ID = ReservedKey.TASK_ID
EVENT_ID = ReservedKey.EVENT_ID
EVENT_ORIGIN = ReservedKey.EVENT_ORIGIN
EVENT_ORIGIN_SITE = ReservedKey.EVENT_ORIGIN_SITE
EVENT_DATA = ReservedKey.EVENT_DATA
EVENT_SCOPE = ReservedKey.EVENT_SCOPE
EXCEPTIONS = ReservedKey.EXCEPTIONS
CLIENT_NAME = ReservedKey.CLIENT_NAME
WORKSPACE_ROOT = ReservedKey.WORKSPACE_ROOT
CURRENT_RUN = ReservedKey.RUN_NUM
APP_ROOT = ReservedKey.APP_ROOT
PEER_CONTEXT = ReservedKey.PEER_CTX
IS_CLIENT_TASK_RESEND = ReservedKey.IS_RESEND
RUNNER = ReservedKey.RUNNER
WORKFLOW = ReservedKey.WORKFLOW
SHAREABLE = ReservedKey.SHAREABLE
RUN_ABORT_SIGNAL = ReservedKey.RUN_ABORT_SIGNAL
ARGS = ReservedKey.ARGS
REPLY = ReservedKey.REPLY
WORKSPACE_OBJECT = ReservedKey.WORKSPACE_OBJECT
RANK_NUMBER = ReservedKey.RANK_NUMBER
NUM_OF_PROCESSES = ReservedKey.NUM_OF_PROCESSES
FROM_RANK_NUMBER = ReservedKey.FROM_RANK_NUMBER
SECURE_MODE = ReservedKey.SECURE_MODE
SIMULATE_MODE = ReservedKey.SIMULATE_MODE
SP_END_POINT = ReservedKey.SP_END_POINT
JOB_INFO = ReservedKey.JOB_INFO
JOB_META = ReservedKey.JOB_META
CURRENT_JOB_ID = ReservedKey.CURRENT_JOB_ID
JOB_RUN_NUMBER = ReservedKey.JOB_RUN_NUMBER
JOB_DEPLOY_DETAIL = ReservedKey.JOB_DEPLOY_DETAIL
JOB_SCOPE_NAME = "__job_scope_name__"
EFFECTIVE_JOB_SCOPE_NAME = "__effective_job_scope_name__"
SCOPE_PROPERTIES = "__scope_props__"
SCOPE_OBJECT = "__scope_object__"
FATAL_SYSTEM_ERROR = ReservedKey.FATAL_SYSTEM_ERROR
COMMUNICATION_ERROR = "Flare_communication_error__"
UNAUTHENTICATED = "Flare_unauthenticated__"
CLIENT_RESOURCE_SPECS = "__client_resource_specs"
JOB_PARTICIPANTS = "__job_participants"
JOB_BLOCK_REASON = "__job_block_reason" # why the job should be blocked from scheduling
SSID = "__ssid__"
COMPONENT_BUILD_ERROR = "__component_build_error__"
COMPONENT_CONFIG = "__component_config__"
COMPONENT_NODE = "__component_node__"
CONFIG_CTX = "__config_ctx__"
class ReservedTopic(object):
END_RUN = "__end_run__"
ABORT_ASK = "__abort_task__"
DO_TASK = "__do_task__"
AUX_COMMAND = "__aux_command__"
SYNC_RUNNER = "__sync_runner__"
class AdminCommandNames(object):
SUBMIT_JOB = "submit_job"
LIST_JOBS = "list_jobs"
GET_JOB_META = "get_job_meta"
DOWNLOAD_JOB = "download_job"
DOWNLOAD_JOB_FILE = "download_job_file"
ABORT_JOB = "abort_job"
DELETE_JOB = "delete_job"
CLONE_JOB = "clone_job"
DELETE_WORKSPACE = "delete_workspace"
DEPLOY_APP = "deploy_app"
START_APP = "start_app"
CHECK_STATUS = "check_status"
ADMIN_CHECK_STATUS = "admin_check_status"
ABORT = "abort"
ABORT_TASK = "abort_task"
REMOVE_CLIENT = "remove_client"
SHUTDOWN = "shutdown"
RESTART = "restart"
SET_TIMEOUT = "set_timeout"
SHOW_STATS = "show_stats"
SHOW_ERRORS = "show_errors"
RESET_ERRORS = "reset_errors"
AUX_COMMAND = "aux_command"
SYS_INFO = "sys_info"
REPORT_RESOURCES = "report_resources"
SHOW_SCOPES = "show_scopes"
CALL = "call"
SHELL_PWD = "pwd"
SHELL_LS = "ls"
SHELL_CAT = "cat"
SHELL_HEAD = "head"
SHELL_TAIL = "tail"
SHELL_GREP = "grep"
class ServerCommandNames(object):
GET_RUN_INFO = "get_run_info"
GET_TASK = "get_task"
SUBMIT_UPDATE = "submit_update"
AUX_COMMUNICATE = "aux_communicate"
HEARTBEAT = "heartbeat"
GET_CLIENTS = "get_clients"
AUX_SEND = "aux_send"
SHOW_STATS = "show_stats"
GET_ERRORS = "get_errors"
RESET_ERRORS = "reset_errors"
UPDATE_RUN_STATUS = "update_run_status"
HANDLE_DEAD_JOB = "handle_dead_job"
SERVER_STATE = "server_state"
class ServerCommandKey(object):
COMMAND = "command"
DATA = "data"
FL_CONTEXT = "fl_context"
PEER_FL_CONTEXT = "peer_fl_ctx"
SHAREABLE = "shareable"
TASK_NAME = "task_name"
TASK_ID = "task_id"
FL_CLIENT = "fl_client"
TOPIC = "topic"
AUX_REPLY = "aux_reply"
JOB_ID = "job_id"
CLIENTS = "clients"
COLLECTOR = "collector"
TURN_TO_COLD = "__turn_to_cold__"
class FedEventHeader(object):
TIMESTAMP = "_timestamp"
EVENT_TYPE = "_event_type"
DIRECTION = "_direction"
ORIGIN = "_origin"
TARGETS = "_targets"
class EventScope(object):
FEDERATION = "federation"
LOCAL = "local"
class NonSerializableKeys(object):
KEYS = [
ReservedKey.ENGINE,
ReservedKey.MANAGER,
ReservedKey.RUNNER,
FLContextKey.SCOPE_PROPERTIES,
FLContextKey.SCOPE_OBJECT,
FLContextKey.WORKSPACE_OBJECT,
FLContextKey.TASK_DATA,
FLContextKey.SHAREABLE,
]
class LogMessageTag(object):
DEBUG = "log/debug"
ERROR = "log/error"
EXCEPTION = "log/exception"
INFO = "log/info"
WARNING = "log/warning"
CRITICAL = "log/critical"
LOG_RECORD = "log_record"
class SnapshotKey(object):
FL_CONTEXT = "fl_context"
SERVER_RUNNER = "_Server_Runner"
WORKSPACE = "_workspace"
JOB_INFO = "_job_info"
JOB_ID = "_job_id"
JOB_CLIENTS = "_job_clients"
class RunProcessKey(object):
LISTEN_PORT = "_listen_port"
CONNECTION = "_conn"
CHILD_PROCESS = "_child_process"
STATUS = "_status"
JOB_ID = "_job_id"
PARTICIPANTS = "_participants"
PROCESS_FINISHED = "_process_finished"
PROCESS_EXE_ERROR = "_process_exe_error"
PROCESS_RETURN_CODE = "_process_return_code"
class SystemComponents(object):
JOB_SCHEDULER = "job_scheduler"
JOB_MANAGER = "job_manager"
JOB_RUNNER = "job_runner"
SERVER_RUNNER = "server_runner"
CLIENT_RUNNER = "client_runner"
CHECK_RESOURCE_PROCESSOR = "check_resource_processor"
CANCEL_RESOURCE_PROCESSOR = "cancel_resource_processor"
RESOURCE_MANAGER = "resource_manager"
RESOURCE_CONSUMER = "resource_consumer"
APP_DEPLOYER = "app_deployer"
DEFAULT_APP_DEPLOYER = "default_app_deployer"
JOB_META_VALIDATOR = "job_meta_validator"
class JobConstants:
SERVER_JOB_CONFIG = "config_fed_server.json"
CLIENT_JOB_CONFIG = "config_fed_client.json"
META_FILE = "meta.json"
META = "meta"
class WorkspaceConstants:
"""hard coded file names inside the workspace folder."""
STARTUP_FOLDER_NAME = "startup"
SITE_FOLDER_NAME = "local"
CUSTOM_FOLDER_NAME = "custom"
LOGGING_CONFIG = "log.config"
DEFAULT_LOGGING_CONFIG = LOGGING_CONFIG + ".default"
AUDIT_LOG = "audit.log"
LOG_FILE_NAME = "log.txt"
STATS_POOL_SUMMARY_FILE_NAME = "stats_pool_summary.json"
STATS_POOL_RECORDS_FILE_NAME = "stats_pool_records.csv"
# these two files is used by shell scripts to determine restart / shutdown
RESTART_FILE = "restart.fl"
SHUTDOWN_FILE = "shutdown.fl"
WORKSPACE_PREFIX = ""
APP_PREFIX = "app_"
SERVER_STARTUP_CONFIG = "fed_server.json"
CLIENT_STARTUP_CONFIG = "fed_client.json"
SERVER_APP_CONFIG = JobConstants.SERVER_JOB_CONFIG
CLIENT_APP_CONFIG = JobConstants.CLIENT_JOB_CONFIG
JOB_META_FILE = "meta.json"
AUTHORIZATION_CONFIG = "authorization.json"
DEFAULT_AUTHORIZATION_CONFIG = AUTHORIZATION_CONFIG + ".default"
RESOURCES_CONFIG = "resources.json"
DEFAULT_RESOURCES_CONFIG = RESOURCES_CONFIG + ".default"
PRIVACY_CONFIG = "privacy.json"
SAMPLE_PRIVACY_CONFIG = PRIVACY_CONFIG + ".sample"
JOB_RESOURCES_CONFIG = "job_resources.json"
ADMIN_STARTUP_CONFIG = "fed_admin.json"
class SiteType:
SERVER = "server"
CLIENT = "client"
ALL = "@ALL"
class SystemConfigs:
STARTUP_CONF = "start_config"
RESOURCES_CONF = "resources_config"
APPLICATION_CONF = "application_config"
class SecureTrainConst:
SSL_ROOT_CERT = "ssl_root_cert"
SSL_CERT = "ssl_cert"
PRIVATE_KEY = "ssl_private_key"
class FLMetaKey:
NUM_STEPS_CURRENT_ROUND = "NUM_STEPS_CURRENT_ROUND"
PROCESSED_ALGORITHM = "PROCESSED_ALGORITHM"
PROCESSED_KEYS = "PROCESSED_KEYS"
INITIAL_METRICS = "initial_metrics"
FILTER_HISTORY = "filter_history"
CONFIGS = "configs"
VALIDATE_TYPE = "validate_type"
CURRENT_ROUND = "current_round"
TOTAL_ROUNDS = "total_rounds"
JOB_ID = "job_id"
SITE_NAME = "site_name"
class FilterKey:
IN = "in"
OUT = "out"
INOUT = "inout"
DELIMITER = "/"
class ConfigVarName:
RUNNER_SYNC_TIMEOUT = "runner_sync_timeout"
MAX_RUNNER_SYNC_TRIES = "max_runner_sync_tries"
| NVFlare-main | nvflare/apis/fl_constant.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fl_context import FLContext
class StatePersistable:
def get_persist_state(self, fl_ctx: FLContext) -> dict:
"""Generate data from state to be persisted.
Args:
fl_ctx: FLContext
Returns:
A dict serializable persist data
"""
return {}
def restore(self, state_data: dict, fl_ctx: FLContext):
"""Restore the state from persisted data.
Args:
state_data: serialized persist data
fl_ctx: FLContext
"""
pass
| NVFlare-main | nvflare/apis/persistable.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Tuple
from nvflare.apis.signal import Signal
from .client import Client
from .fl_component import FLComponent
from .fl_context import FLContext
from .shareable import Shareable
class Responder(FLComponent, ABC):
def __init__(self):
"""Init the Responder.
Base class for responding to clients. Controller is a subclass of Responder.
"""
FLComponent.__init__(self)
@abstractmethod
def process_task_request(self, client: Client, fl_ctx: FLContext) -> Tuple[str, str, Shareable]:
"""Called by the Engine when a task request is received from a client.
Args:
client: the Client that the task request is from
fl_ctx: the FLContext
Returns: task name, task id, and task data
"""
pass
@abstractmethod
def handle_exception(self, task_id: str, fl_ctx: FLContext):
"""Called after process_task_request returns, but exception occurs before task is sent out."""
pass
@abstractmethod
def process_submission(self, client: Client, task_name: str, task_id: str, result: Shareable, fl_ctx: FLContext):
"""Called by the Engine to process the submitted result from a client.
Args:
client: the Client that the submitted result is from
task_name: the name of the task
task_id: the id of the task
result: the Shareable result from the Client
fl_ctx: the FLContext
"""
pass
@abstractmethod
def handle_dead_job(self, client_name: str, fl_ctx: FLContext):
"""Called by the Engine to handle the case that the job on the client is dead.
Args:
client_name: name of the client on which the job is dead
fl_ctx: the FLContext
"""
pass
def initialize_run(self, fl_ctx: FLContext):
"""Called when a new RUN is about to start.
Args:
fl_ctx: FL context. It must contain 'job_id' that is to be initialized
"""
pass
@abstractmethod
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
"""This is the control logic for the RUN.
NOTE: this is running in a separate thread, and its life is the duration of the RUN.
Args:
fl_ctx: the FL context
abort_signal: the abort signal. If triggered, this method stops waiting and returns to the caller.
"""
pass
def finalize_run(self, fl_ctx: FLContext):
"""Called when a new RUN is finished.
Args:
fl_ctx: the FL context
"""
pass
| NVFlare-main | nvflare/apis/responder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunSnapshot:
"""RunSnapshot keeps a snapshot of all the FLComponent states.
The format is:
{ component_id: component_state_dict }
"""
def __init__(self, job_id: str):
super().__init__()
self.component_states = {}
self.completed = False
self.job_id = job_id
def get_component_snapshot(self, component_id: str) -> dict:
"""Get a state snapshot of a particular FL component.
Args:
component_id: Component ID
Returns:
A component state dict.
"""
return self.component_states.get(component_id)
def set_component_snapshot(self, component_id: str, component_state: dict):
"""Set the snapshot of a particular FL component.
Args:
component_id: Component ID
component_state: component state dict
"""
self.component_states[component_id] = component_state
def get_snapshot(self) -> dict:
return self.component_states
class FLSnapshot:
"""FLSnapshot keeps a snapshot of all the current running FL application RunSnapshots.
The format is:
{ job_id: RunSnapshot }
"""
def __init__(self):
super().__init__()
self.run_snapshots = {}
def add_snapshot(self, job_id: str, snapshot: RunSnapshot):
"""Add the RunSnapshot for job_id to the FLSnapshot.
Args:
job_id: the job_id
snapshot: snapshot of the Run
Returns:
"""
self.run_snapshots[job_id] = snapshot
def get_snapshot(self, job_id: str) -> RunSnapshot:
"""Get the RunSnapshot for job_id to the FLSnapshot.
Args:
job_id: the job_id
Returns: Snapshot of the Run
"""
return self.run_snapshots.get(job_id)
def remove_snapshot(self, job_id: str):
"""Remove the RunSnapshot of job_id from the FLSnapshot.
Args:
job_id: the job_id
Returns:
"""
if job_id in self.run_snapshots.keys():
self.run_snapshots.pop(job_id)
| NVFlare-main | nvflare/apis/fl_snapshot.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from .fl_context import FLContext
class ClientEngineSpec(ABC):
@abstractmethod
def fire_event(self, event_type: str, fl_ctx: FLContext):
pass
@abstractmethod
def new_context(self) -> FLContext:
# the engine must use FLContextManager to create a new context!
pass
@abstractmethod
def get_component(self, component_id: str) -> object:
pass
| NVFlare-main | nvflare/apis/client_engine_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.apis.signal import Signal
from .fl_component import FLComponent
from .fl_context import FLContext
from .shareable import Shareable
class Executor(FLComponent, ABC):
"""Executors run on federated client side.
Each job can contain multiple applications or apps folder.
Each site (server or client) will have 1 app deployed for that job.
The server side app contains a Controller that will schedule `Task`.
The client side app contains an Executor that will execute corresponding logic based on `Task`'s name.
"""
def __init__(self):
FLComponent.__init__(self)
self.unsafe = False
@abstractmethod
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Executes a task.
Args:
task_name (str): task name.
shareable (Shareable): input shareable.
fl_ctx (FLContext): fl context.
abort_signal (Signal): signal to check during execution to determine whether this task is aborted.
Returns:
An output shareable.
"""
pass
| NVFlare-main | nvflare/apis/executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import ClientTask, ControllerSpec, SendOrder, Task, TaskCompletionStatus
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FilterKey, FLContextKey, ReservedKey, ReservedTopic, ReturnCode, SiteType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.apis.utils.task_utils import apply_filters
from nvflare.private.fed.utils.fed_utils import get_target_names
from nvflare.private.privacy_manager import Scope
from nvflare.security.logging import secure_format_exception
class TaskController(FLComponent, ControllerSpec):
def __init__(
self,
) -> None:
super().__init__()
self.task_data_filters = {}
self.task_result_filters = {}
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.start_controller(fl_ctx)
elif event_type == EventType.END_RUN:
self.stop_controller(fl_ctx)
def start_controller(self, fl_ctx: FLContext):
client_runner = fl_ctx.get_prop(FLContextKey.RUNNER)
self.task_data_filters = client_runner.task_data_filters
if not self.task_data_filters:
self.task_data_filters = {}
self.task_result_filters = client_runner.task_result_filters
if not self.task_result_filters:
self.task_result_filters = {}
def stop_controller(self, fl_ctx: FLContext):
pass
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
pass
def broadcast(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 0,
wait_time_after_min_received: int = 0,
):
return self.broadcast_and_wait(task, fl_ctx, targets, min_responses, wait_time_after_min_received)
def broadcast_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 0,
wait_time_after_min_received: int = 0,
abort_signal: Signal = None,
):
engine = fl_ctx.get_engine()
request = task.data
# apply task filters
self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_DATA_FILTER")
self.fire_event(EventType.BEFORE_TASK_DATA_FILTER, fl_ctx)
# # first apply privacy-defined filters
try:
filter_name = Scope.TASK_DATA_FILTERS_NAME
task.data = apply_filters(filter_name, request, fl_ctx, self.task_data_filters, task.name, FilterKey.OUT)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in task data filter {}; "
"asked client to try again later".format(secure_format_exception(e)),
)
replies = self._make_error_reply(ReturnCode.TASK_DATA_FILTER_ERROR, targets)
return replies
target_names = get_target_names(targets)
_, invalid_names = engine.validate_targets(target_names)
if invalid_names:
raise ValueError(f"invalid target(s): {invalid_names}")
# set up ClientTask for each client
for target in targets:
client: Client = self._get_client(target, engine)
client_task = ClientTask(task=task, client=client)
task.client_tasks.append(client_task)
task.last_client_task_map[client_task.id] = client_task
# task_cb_error = self._call_task_cb(task.before_task_sent_cb, client, task, fl_ctx)
# if task_cb_error:
# return self._make_error_reply(ReturnCode.ERROR, targets)
if task.timeout <= 0:
raise ValueError(f"The task timeout must > 0. But got {task.timeout}")
request.set_header(ReservedKey.TASK_NAME, task.name)
replies = engine.send_aux_request(
targets=targets,
topic=ReservedTopic.DO_TASK,
request=request,
timeout=task.timeout,
fl_ctx=fl_ctx,
secure=task.secure,
)
self.log_debug(fl_ctx, "firing event EventType.AFTER_TASK_EXECUTION")
self.fire_event(EventType.AFTER_TASK_EXECUTION, fl_ctx)
self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_RESULT_FILTER")
self.fire_event(EventType.BEFORE_TASK_RESULT_FILTER, fl_ctx)
for target, reply in replies.items():
# get the client task for the target
for client_task in task.client_tasks:
if client_task.client.name == target:
rc = reply.get_return_code()
if rc and rc == ReturnCode.OK:
# apply result filters
try:
filter_name = Scope.TASK_RESULT_FILTERS_NAME
reply = apply_filters(
filter_name, reply, fl_ctx, self.task_result_filters, task.name, FilterKey.IN
)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in task result filter {}; ".format(secure_format_exception(e)),
)
error_reply = make_reply(ReturnCode.TASK_RESULT_FILTER_ERROR)
client_task.result = error_reply
break
# assign replies to client task, prepare for the result_received_cb
client_task.result = reply
client: Client = self._get_client(target, engine)
task_cb_error = self._call_task_cb(task.result_received_cb, client, task, fl_ctx)
if task_cb_error:
client_task.result = make_reply(ReturnCode.ERROR)
break
else:
client_task.result = make_reply(ReturnCode.ERROR)
break
# apply task_done_cb
if task.task_done_cb is not None:
try:
task.task_done_cb(task=task, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx, f"processing error in task_done_cb error on task {task.name}: {secure_format_exception(e)}"
),
task.completion_status = TaskCompletionStatus.ERROR
task.exception = e
return self._make_error_reply(ReturnCode.ERROR, targets)
replies = {}
for client_task in task.client_tasks:
replies[client_task.client.name] = client_task.result
return replies
def _make_error_reply(self, error_type, targets):
error_reply = make_reply(error_type)
replies = {}
for target in targets:
replies[target] = error_reply
return replies
def _get_client(self, client, engine) -> Client:
if isinstance(client, Client):
return client
if client == SiteType.SERVER:
return Client(SiteType.SERVER, None)
client_obj = None
for _, c in engine.all_clients.items():
if client == c.name:
client_obj = c
return client_obj
def _call_task_cb(self, task_cb, client, task, fl_ctx):
task_cb_error = False
with task.cb_lock:
client_task = self._get_client_task(client, task)
if task_cb is not None:
try:
task_cb(client_task=client_task, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx,
f"processing error in {task_cb} on task {client_task.task.name} "
f"({client_task.id}): {secure_format_exception(e)}",
)
# this task cannot proceed anymore
task.completion_status = TaskCompletionStatus.ERROR
task.exception = e
task_cb_error = True
self.logger.debug(f"{task_cb} done on client_task: {client_task}")
self.logger.debug(f"task completion status is {task.completion_status}")
return task_cb_error
def _get_client_task(self, client, task):
client_task = None
for t in task.client_tasks:
if t.client.name == client.name:
client_task = t
return client_task
def send(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
):
engine = fl_ctx.get_engine()
self._validate_target(engine, targets)
return self.send_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout)
def send_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
abort_signal: Signal = None,
):
engine = fl_ctx.get_engine()
self._validate_target(engine, targets)
replies = {}
for target in targets:
reply = self.broadcast_and_wait(task, fl_ctx, [target], abort_signal=abort_signal)
replies.update(reply)
return replies
def _validate_target(self, engine, targets):
if len(targets) == 0:
raise ValueError("Must provide a target to send.")
if len(targets) != 1:
raise ValueError("send_and_wait can only send to a single target.")
target_names = get_target_names(targets)
_, invalid_names = engine.validate_targets(target_names)
if invalid_names:
raise ValueError(f"invalid target(s): {invalid_names}")
| NVFlare-main | nvflare/apis/impl/task_controller.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from abc import ABC
from threading import Lock
from typing import List, Optional, Tuple, Union
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import ClientTask, ControllerSpec, SendOrder, Task, TaskCompletionStatus
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import job_from_meta
from nvflare.apis.responder import Responder
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_copy
from nvflare.apis.signal import Signal
from nvflare.fuel.utils.config_service import ConfigService
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
from .any_relay_manager import AnyRelayTaskManager
from .bcast_manager import BcastForeverTaskManager, BcastTaskManager
from .send_manager import SendTaskManager
from .seq_relay_manager import SequentialRelayTaskManager
from .task_manager import TaskCheckStatus, TaskManager
_TASK_KEY_ENGINE = "___engine"
_TASK_KEY_MANAGER = "___mgr"
_TASK_KEY_DONE = "___done"
# wait this long since client death report before treating the client as dead
_CONFIG_VAR_DEAD_CLIENT_GRACE_PERIOD = "dead_client_grace_period"
# wait this long since job schedule time before starting to check dead clients
_CONFIG_VAR_DEAD_CLIENT_CHECK_LEAD_TIME = "dead_client_check_lead_time"
def _check_positive_int(name, value):
if not isinstance(value, int):
raise TypeError("{} must be an instance of int, but got {}.".format(name, type(name)))
if value < 0:
raise ValueError("{} must >= 0.".format(name))
def _check_inputs(task: Task, fl_ctx: FLContext, targets: Union[List[Client], List[str], None]):
if not isinstance(task, Task):
raise TypeError("task must be an instance of Task, but got {}".format(type(task)))
if not isinstance(fl_ctx, FLContext):
raise TypeError("fl_ctx must be an instance of FLContext, but got {}".format(type(fl_ctx)))
if targets is not None:
if not isinstance(targets, list):
raise TypeError("targets must be a list of Client or string, but got {}".format(type(targets)))
for t in targets:
if not isinstance(t, (Client, str)):
raise TypeError(
"targets must be a list of Client or string, but got element of type {}".format(type(t))
)
def _get_client_task(target, task: Task):
for ct in task.client_tasks:
if target == ct.client.name:
return ct
return None
class Controller(Responder, ControllerSpec, ABC):
def __init__(self, task_check_period=0.2):
"""Manage life cycles of tasks and their destinations.
Args:
task_check_period (float, optional): interval for checking status of tasks. Defaults to 0.2.
"""
super().__init__()
self._engine = None
self._tasks = [] # list of standing tasks
self._client_task_map = {} # client_task_id => client_task
self._all_done = False
self._task_lock = Lock()
self._task_monitor = threading.Thread(target=self._monitor_tasks, args=())
self._task_check_period = task_check_period
self._dead_client_reports = {} # clients that reported the job is dead on it: name => report time
self._dead_clients_lock = Lock() # need lock since dead_clients can be modified from different threads
# make sure _check_tasks, process_task_request, process_submission does not interfere with each other
self._controller_lock = Lock()
def initialize_run(self, fl_ctx: FLContext):
"""Called by runners to initialize controller with information in fl_ctx.
.. attention::
Controller subclasses must not overwrite this method.
Args:
fl_ctx (FLContext): FLContext information
"""
engine = fl_ctx.get_engine()
if not engine:
self.system_panic(f"Engine not found. {self.__class__.__name__} exiting.", fl_ctx)
return
self._engine = engine
self.start_controller(fl_ctx)
self._task_monitor.start()
def _try_again(self) -> Tuple[str, str, Shareable]:
# TODO: how to tell client no shareable available now?
return "", "", None
def _set_stats(self, fl_ctx: FLContext):
"""Called to set stats into InfoCollector.
Args:
fl_ctx (FLContext): info collector is retrieved from fl_ctx with InfoCollector.CTX_KEY_STATS_COLLECTOR key
"""
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR, None)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError(
"collector must be an instance of GroupInfoCollector, but got {}".format(type(collector))
)
collector.set_info(
group_name=self._name,
info={
"tasks": {t.name: [ct.client.name for ct in t.client_tasks] for t in self._tasks},
},
)
def handle_event(self, event_type: str, fl_ctx: FLContext):
"""Called when events are fired.
Args:
event_type (str): all event types, including AppEventType and EventType
fl_ctx (FLContext): FLContext information with current event type
"""
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
self._set_stats(fl_ctx)
def process_task_request(self, client: Client, fl_ctx: FLContext) -> Tuple[str, str, Shareable]:
"""Called by runner when a client asks for a task.
.. note::
This is called in a separate thread.
Args:
client (Client): The record of one client requesting tasks
fl_ctx (FLContext): The FLContext associated with this request
Raises:
TypeError: when client is not an instance of Client
TypeError: when fl_ctx is not an instance of FLContext
TypeError: when any standing task containing an invalid client_task
Returns:
Tuple[str, str, Shareable]: task_name, an id for the client_task, and the data for this request
"""
with self._controller_lock:
return self._do_process_task_request(client, fl_ctx)
def _do_process_task_request(self, client: Client, fl_ctx: FLContext) -> Tuple[str, str, Shareable]:
if not isinstance(client, Client):
raise TypeError("client must be an instance of Client, but got {}".format(type(client)))
with self._dead_clients_lock:
self._dead_client_reports.pop(client.name, None)
if not isinstance(fl_ctx, FLContext):
raise TypeError("fl_ctx must be an instance of FLContext, but got {}".format(type(fl_ctx)))
client_task_to_send = None
with self._task_lock:
self.logger.debug("self._tasks: {}".format(self._tasks))
for task in self._tasks:
if task.completion_status is not None:
# this task is finished (and waiting for the monitor to exit it)
continue
# do we need to send this task to this client?
# note: the task could be sent to a client multiple times (e.g. in relay)
# we only check the last ClientTask sent to the client
client_task_to_check = task.last_client_task_map.get(client.name, None)
self.logger.debug("client_task_to_check: {}".format(client_task_to_check))
resend_task = False
if client_task_to_check is not None:
# this client has been sent the task already
if client_task_to_check.result_received_time is None:
# controller has not received result from client
# something wrong happens when client working on this task, so resend the task
resend_task = True
client_task_to_send = client_task_to_check
fl_ctx.set_prop(FLContextKey.IS_CLIENT_TASK_RESEND, True, sticky=False)
if not resend_task:
# check with the task manager whether to send
manager = task.props[_TASK_KEY_MANAGER]
if client_task_to_check is None:
client_task_to_check = ClientTask(task=task, client=client)
check_status = manager.check_task_send(client_task_to_check, fl_ctx)
self.logger.debug(
"Checking client task: {}, task.client.name: {}".format(
client_task_to_check, client_task_to_check.client.name
)
)
self.logger.debug("Check task send get check_status: {}".format(check_status))
if check_status == TaskCheckStatus.BLOCK:
# do not send this task, and do not check other tasks
return self._try_again()
elif check_status == TaskCheckStatus.NO_BLOCK:
# do not send this task, but continue to check next task
continue
else:
# creates the client_task to be checked for sending
client_task_to_send = ClientTask(client, task)
break
# NOTE: move task sending process outside the task lock
# This is to minimize the locking time and to avoid potential deadlock:
# the CB could schedule another task, which requires lock
self.logger.debug("Determining based on client_task_to_send: {}".format(client_task_to_send))
if client_task_to_send is None:
# no task available for this client
return self._try_again()
# try to send the task
can_send_task = True
task = client_task_to_send.task
with task.cb_lock:
# Note: must guarantee the after_task_sent_cb is always called
# regardless whether the task is sent successfully.
# This is so that the app could clear up things in after_task_sent_cb.
if task.before_task_sent_cb is not None:
try:
task.before_task_sent_cb(client_task=client_task_to_send, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in before_task_sent_cb on task {} ({}): {}".format(
client_task_to_send.task.name, client_task_to_send.id, secure_format_exception(e)
),
)
# this task cannot proceed anymore
task.completion_status = TaskCompletionStatus.ERROR
task.exception = e
self.logger.debug("before_task_sent_cb done on client_task_to_send: {}".format(client_task_to_send))
self.logger.debug(f"task completion status is {task.completion_status}")
if task.completion_status is not None:
can_send_task = False
# remember the task name and data to be sent to the client
# since task.data could be reset by the after_task_sent_cb
task_name = task.name
task_data = task.data
operator = task.operator
if task.after_task_sent_cb is not None:
try:
task.after_task_sent_cb(client_task=client_task_to_send, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in after_task_sent_cb on task {} ({}): {}".format(
client_task_to_send.task.name, client_task_to_send.id, secure_format_exception(e)
),
)
task.completion_status = TaskCompletionStatus.ERROR
task.exception = e
if task.completion_status is not None:
# NOTE: the CB could cancel the task
can_send_task = False
if not can_send_task:
return self._try_again()
self.logger.debug("after_task_sent_cb done on client_task_to_send: {}".format(client_task_to_send))
with self._task_lock:
# sent the ClientTask and remember it
now = time.time()
client_task_to_send.task_sent_time = now
client_task_to_send.task_send_count += 1
# add task operator to task_data shareable
if operator:
task_data.set_header(key=ReservedHeaderKey.TASK_OPERATOR, value=operator)
if not resend_task:
task.last_client_task_map[client.name] = client_task_to_send
task.client_tasks.append(client_task_to_send)
self._client_task_map[client_task_to_send.id] = client_task_to_send
task_data.set_header(ReservedHeaderKey.TASK_ID, client_task_to_send.id)
return task_name, client_task_to_send.id, make_copy(task_data)
def handle_exception(self, task_id: str, fl_ctx: FLContext) -> None:
"""Called to cancel one task as its client_task is causing exception at upper level.
Args:
task_id (str): an id to the failing client_task
fl_ctx (FLContext): FLContext associated with this client_task
"""
with self._task_lock:
# task_id is the uuid associated with the client_task
client_task = self._client_task_map.get(task_id, None)
self.logger.debug("Handle exception on client_task {} with id {}".format(client_task, task_id))
if client_task is None:
# cannot find a standing task on the exception
return
task = client_task.task
self.cancel_task(task=task, fl_ctx=fl_ctx)
self.log_error(fl_ctx, "task {} is cancelled due to exception".format(task.name))
def handle_dead_job(self, client_name: str, fl_ctx: FLContext):
"""Called by the Engine to handle the case that the job on the client is dead.
Args:
client_name: name of the client on which the job is dead
fl_ctx: the FLContext
"""
# record the report and to be used by the task monitor
with self._dead_clients_lock:
self.log_info(fl_ctx, f"received dead job report from client {client_name}")
if not self._dead_client_reports.get(client_name):
self._dead_client_reports[client_name] = time.time()
def process_submission(self, client: Client, task_name: str, task_id: str, result: Shareable, fl_ctx: FLContext):
"""Called to process a submission from one client.
.. note::
This method is called by a separate thread.
Args:
client (Client): the client that submitted this task
task_name (str): the task name associated this submission
task_id (str): the id associated with the client_task
result (Shareable): the actual submitted data from the client
fl_ctx (FLContext): the FLContext associated with this submission
Raises:
TypeError: when client is not an instance of Client
TypeError: when fl_ctx is not an instance of FLContext
TypeError: when result is not an instance of Shareable
ValueError: task_name is not found in the client_task
"""
with self._controller_lock:
self._do_process_submission(client, task_name, task_id, result, fl_ctx)
def _do_process_submission(
self, client: Client, task_name: str, task_id: str, result: Shareable, fl_ctx: FLContext
):
if not isinstance(client, Client):
raise TypeError("client must be an instance of Client, but got {}".format(type(client)))
# reset the dead job report!
# note that due to potential race conditions, a client may fail to include the job id in its
# heartbeat (since the job hasn't started at the time of heartbeat report), but then includes
# the job ID later.
with self._dead_clients_lock:
self._dead_client_reports.pop(client.name, None)
if not isinstance(fl_ctx, FLContext):
raise TypeError("fl_ctx must be an instance of FLContext, but got {}".format(type(fl_ctx)))
if not isinstance(result, Shareable):
raise TypeError("result must be an instance of Shareable, but got {}".format(type(result)))
with self._task_lock:
# task_id is the uuid associated with the client_task
client_task = self._client_task_map.get(task_id, None)
self.log_debug(fl_ctx, "Get submission from client task={} id={}".format(client_task, task_id))
if client_task is None:
# cannot find a standing task for the submission
self.log_debug(fl_ctx, "no standing task found for {}:{}".format(task_name, task_id))
self.process_result_of_unknown_task(client, task_name, task_id, result, fl_ctx)
return
task = client_task.task
with task.cb_lock:
if task.name != task_name:
raise ValueError("client specified task name {} doesn't match {}".format(task_name, task.name))
if task.completion_status is not None:
# the task is already finished - drop the result
self.log_info(fl_ctx, "task is already finished - submission dropped")
return
# do client task CB processing outside the lock
# this is because the CB could schedule another task, which requires the lock
client_task.result = result
manager = task.props[_TASK_KEY_MANAGER]
manager.check_task_result(result, client_task, fl_ctx)
if task.result_received_cb is not None:
try:
self.log_debug(fl_ctx, "invoking result_received_cb ...")
task.result_received_cb(client_task=client_task, fl_ctx=fl_ctx)
except Exception as e:
# this task cannot proceed anymore
self.log_exception(
fl_ctx,
"processing error in result_received_cb on task {}({}): {}".format(
task_name, task_id, secure_format_exception(e)
),
)
task.completion_status = TaskCompletionStatus.ERROR
task.exception = e
else:
self.log_debug(fl_ctx, "no result_received_cb")
client_task.result_received_time = time.time()
def _schedule_task(
self,
task: Task,
fl_ctx: FLContext,
manager: TaskManager,
targets: Union[List[Client], List[str], None],
allow_dup_targets: bool = False,
):
if task.schedule_time is not None:
# this task was scheduled before
# we do not allow a task object to be reused
self.logger.debug("task.schedule_time: {}".format(task.schedule_time))
raise ValueError("Task was already used. Please create a new task object.")
# task.targets = targets
target_names = list()
if targets is None:
for client in self._engine.get_clients():
target_names.append(client.name)
else:
if not isinstance(targets, list):
raise ValueError("task targets must be a list, but got {}".format(type(targets)))
for t in targets:
if isinstance(t, str):
name = t
elif isinstance(t, Client):
name = t.name
else:
raise ValueError("element in targets must be string or Client type, but got {}".format(type(t)))
if allow_dup_targets or (name not in target_names):
target_names.append(name)
task.targets = target_names
task.props[_TASK_KEY_MANAGER] = manager
task.props[_TASK_KEY_ENGINE] = self._engine
task.is_standing = True
task.schedule_time = time.time()
with self._task_lock:
self._tasks.append(task)
self.log_info(fl_ctx, "scheduled task {}".format(task.name))
def broadcast(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 1,
wait_time_after_min_received: int = 0,
):
"""Schedule a broadcast task. This is a non-blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
min_responses (int, optional): the condition to mark this task as completed because enough clients respond with submission. Defaults to 1.
wait_time_after_min_received (int, optional): a grace period for late clients to contribute their submission. 0 means no grace period.
Submission of late clients in the grace period are still collected as valid submission. Defaults to 0.
Raises:
ValueError: min_responses is greater than the length of targets since this condition will make the task, if allowed to be scheduled, never exit.
"""
_check_inputs(task=task, fl_ctx=fl_ctx, targets=targets)
_check_positive_int("min_responses", min_responses)
_check_positive_int("wait_time_after_min_received", wait_time_after_min_received)
if targets and min_responses > len(targets):
raise ValueError(
"min_responses ({}) must be less than length of targets ({}).".format(min_responses, len(targets))
)
manager = BcastTaskManager(
task=task, min_responses=min_responses, wait_time_after_min_received=wait_time_after_min_received
)
self._schedule_task(task=task, fl_ctx=fl_ctx, manager=manager, targets=targets)
def broadcast_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
min_responses: int = 1,
wait_time_after_min_received: int = 0,
abort_signal: Optional[Signal] = None,
):
"""Schedule a broadcast task. This is a blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
min_responses (int, optional): the condition to mark this task as completed because enough clients respond with submission. Defaults to 1.
wait_time_after_min_received (int, optional): a grace period for late clients to contribute their submission. 0 means no grace period.
Submission of late clients in the grace period are still collected as valid submission. Defaults to 0.
abort_signal (Optional[Signal], optional): as this is a blocking call, this abort_signal informs this method to return. Defaults to None.
"""
self.broadcast(
task=task,
fl_ctx=fl_ctx,
targets=targets,
min_responses=min_responses,
wait_time_after_min_received=wait_time_after_min_received,
)
self.wait_for_task(task, abort_signal)
def broadcast_forever(self, task: Task, fl_ctx: FLContext, targets: Union[List[Client], List[str], None] = None):
"""Schedule a broadcast task. This is a non-blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients.
This broadcast will not end.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
"""
_check_inputs(task=task, fl_ctx=fl_ctx, targets=targets)
manager = BcastForeverTaskManager()
self._schedule_task(task=task, fl_ctx=fl_ctx, manager=manager, targets=targets)
def send(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
):
"""Schedule a single task to targets. This is a non-blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients based on the send_order.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
send_order (SendOrder, optional): the order for clients to become eligible. SEQUENTIAL means the order in targets is enforced. ANY means
clients in targets and haven't received task are eligible for task. Defaults to SendOrder.SEQUENTIAL.
task_assignment_timeout (int, optional): how long to wait for one client to pick the task. Defaults to 0.
Raises:
ValueError: when task_assignment_timeout is greater than task's timeout.
TypeError: send_order is not defined in SendOrder
ValueError: targets is None or an empty list
"""
_check_inputs(
task=task,
fl_ctx=fl_ctx,
targets=targets,
)
_check_positive_int("task_assignment_timeout", task_assignment_timeout)
if task.timeout and task_assignment_timeout and task_assignment_timeout > task.timeout:
raise ValueError(
"task_assignment_timeout ({}) needs to be less than or equal to task.timeout ({}).".format(
task_assignment_timeout, task.timeout
)
)
if not isinstance(send_order, SendOrder):
raise TypeError("send_order must be in Enum SendOrder, but got {}".format(type(send_order)))
# targets must be provided
if targets is None or len(targets) == 0:
raise ValueError("Targets must be provided for send.")
manager = SendTaskManager(task, send_order, task_assignment_timeout)
self._schedule_task(
task=task,
fl_ctx=fl_ctx,
manager=manager,
targets=targets,
)
def send_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
abort_signal: Signal = None,
):
"""Schedule a single task to targets. This is a blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients based on the send_order.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
send_order (SendOrder, optional): the order for clients to become eligible. SEQUENTIAL means the order in targets is enforced. ANY means
clients in targets and haven't received task are eligible for task. Defaults to SendOrder.SEQUENTIAL.
task_assignment_timeout (int, optional): how long to wait for one client to pick the task. Defaults to 0.
abort_signal (Optional[Signal], optional): as this is a blocking call, this abort_signal informs this method to return. Defaults to None.
"""
self.send(
task=task,
fl_ctx=fl_ctx,
targets=targets,
send_order=send_order,
task_assignment_timeout=task_assignment_timeout,
)
self.wait_for_task(task, abort_signal)
def get_num_standing_tasks(self) -> int:
"""Get the number of tasks that are currently standing.
Returns:
int: length of the list of standing tasks
"""
return len(self._tasks)
def cancel_task(
self, task: Task, completion_status=TaskCompletionStatus.CANCELLED, fl_ctx: Optional[FLContext] = None
):
"""Cancel the specified task.
Change the task completion_status, which will inform task monitor to clean up this task
note::
We only mark the task as completed and leave it to the task monitor to clean up. This is to avoid potential deadlock of task_lock.
Args:
task (Task): the task to be cancelled
completion_status (str, optional): the completion status for this cancellation. Defaults to TaskCompletionStatus.CANCELLED.
fl_ctx (Optional[FLContext], optional): FLContext associated with this cancellation. Defaults to None.
"""
task.completion_status = completion_status
def cancel_all_tasks(self, completion_status=TaskCompletionStatus.CANCELLED, fl_ctx: Optional[FLContext] = None):
"""Cancel all standing tasks in this controller.
Args:
completion_status (str, optional): the completion status for this cancellation. Defaults to TaskCompletionStatus.CANCELLED.
fl_ctx (Optional[FLContext], optional): FLContext associated with this cancellation. Defaults to None.
"""
with self._task_lock:
for t in self._tasks:
t.completion_status = completion_status
def finalize_run(self, fl_ctx: FLContext):
"""Do cleanup of the coordinator implementation.
.. attention::
Subclass controllers should not overwrite finalize_run.
Args:
fl_ctx (FLContext): FLContext associated with this action
"""
self.cancel_all_tasks() # unconditionally cancel all tasks
self._all_done = True
try:
if self._task_monitor.is_alive():
self._task_monitor.join()
except RuntimeError:
self.log_debug(fl_ctx, "unable to join monitor thread (not started?)")
self.stop_controller(fl_ctx)
def relay(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order: SendOrder = SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
task_result_timeout: int = 0,
dynamic_targets: bool = True,
):
"""Schedule a single task to targets in one-after-another style. This is a non-blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients based on the send_order.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
send_order (SendOrder, optional): the order for clients to become eligible.
SEQUENTIAL means the order in targets is enforced.
ANY means any clients that are inside the targets and haven't received the task are eligible. Defaults to SendOrder.SEQUENTIAL.
task_assignment_timeout (int, optional): how long to wait for one client to pick the task. Defaults to 0.
task_result_timeout (int, optional): how long to wait for current working client to reply its result. Defaults to 0.
dynamic_targets (bool, optional): allow clients not in targets to join at the end of targets list. Defaults to True.
Raises:
ValueError: when task_assignment_timeout is greater than task's timeout
ValueError: when task_result_timeout is greater than task's timeout
TypeError: send_order is not defined in SendOrder
TypeError: when dynamic_targets is not a boolean variable
ValueError: targets is None or an empty list but dynamic_targets is False
"""
_check_inputs(
task=task,
fl_ctx=fl_ctx,
targets=targets,
)
_check_positive_int("task_assignment_timeout", task_assignment_timeout)
_check_positive_int("task_result_timeout", task_result_timeout)
if task.timeout and task_assignment_timeout and task_assignment_timeout > task.timeout:
raise ValueError(
"task_assignment_timeout ({}) needs to be less than or equal to task.timeout ({}).".format(
task_assignment_timeout, task.timeout
)
)
if task.timeout and task_result_timeout and task_result_timeout > task.timeout:
raise ValueError(
"task_result_timeout ({}) needs to be less than or equal to task.timeout ({}).".format(
task_result_timeout, task.timeout
)
)
if not isinstance(send_order, SendOrder):
raise TypeError("send_order must be in Enum SendOrder, but got {}".format(type(send_order)))
if not isinstance(dynamic_targets, bool):
raise TypeError("dynamic_targets must be an instance of bool, but got {}".format(type(dynamic_targets)))
if targets is None and dynamic_targets is False:
raise ValueError("Need to provide targets when dynamic_targets is set to False.")
if send_order == SendOrder.SEQUENTIAL:
manager = SequentialRelayTaskManager(
task=task,
task_assignment_timeout=task_assignment_timeout,
task_result_timeout=task_result_timeout,
dynamic_targets=dynamic_targets,
)
else:
manager = AnyRelayTaskManager(
task=task, task_result_timeout=task_result_timeout, dynamic_targets=dynamic_targets
)
self._schedule_task(
task=task,
fl_ctx=fl_ctx,
manager=manager,
targets=targets,
allow_dup_targets=True,
)
def relay_and_wait(
self,
task: Task,
fl_ctx: FLContext,
targets: Union[List[Client], List[str], None] = None,
send_order=SendOrder.SEQUENTIAL,
task_assignment_timeout: int = 0,
task_result_timeout: int = 0,
dynamic_targets: bool = True,
abort_signal: Optional[Signal] = None,
):
"""Schedule a single task to targets in one-after-another style. This is a blocking call.
The task is scheduled into a task list. Clients can request tasks and controller will dispatch the task to eligible clients based on the send_order.
Args:
task (Task): the task to be scheduled
fl_ctx (FLContext): FLContext associated with this task
targets (Union[List[Client], List[str], None], optional): the list of eligible clients or client names or None (all clients). Defaults to None.
send_order (SendOrder, optional): the order for clients to become eligible. SEQUENTIAL means the order in targets is enforced. ANY means
clients in targets and haven't received task are eligible for task. Defaults to SendOrder.SEQUENTIAL.
task_assignment_timeout (int, optional): how long to wait for one client to pick the task. Defaults to 0.
task_result_timeout (int, optional): how long to wait for current working client to reply its result. Defaults to 0.
dynamic_targets (bool, optional): allow clients not in targets to join at the end of targets list. Defaults to True.
abort_signal (Optional[Signal], optional): as this is a blocking call, this abort_signal informs this method to return. Defaults to None.
"""
self.relay(
task=task,
fl_ctx=fl_ctx,
targets=targets,
send_order=send_order,
task_assignment_timeout=task_assignment_timeout,
task_result_timeout=task_result_timeout,
dynamic_targets=dynamic_targets,
)
self.wait_for_task(task, abort_signal)
def _monitor_tasks(self):
while not self._all_done:
should_abort_job = self._job_policy_violated()
if not should_abort_job:
self._check_tasks()
else:
with self._engine.new_context() as fl_ctx:
self.system_panic("Aborting job due to deployment policy violation", fl_ctx)
return
time.sleep(self._task_check_period)
def _check_tasks(self):
with self._controller_lock:
self._do_check_tasks()
def _do_check_tasks(self):
exit_tasks = []
with self._task_lock:
for task in self._tasks:
if task.completion_status is not None:
exit_tasks.append(task)
continue
# check the task-specific exit condition
manager = task.props[_TASK_KEY_MANAGER]
if manager is not None:
if not isinstance(manager, TaskManager):
raise TypeError(
"manager in task must be an instance of TaskManager, but got {}".format(manager)
)
should_exit, exit_status = manager.check_task_exit(task)
self.logger.debug("should_exit: {}, exit_status: {}".format(should_exit, exit_status))
if should_exit:
task.completion_status = exit_status
exit_tasks.append(task)
continue
# check if task timeout
if task.timeout and time.time() - task.schedule_time >= task.timeout:
task.completion_status = TaskCompletionStatus.TIMEOUT
exit_tasks.append(task)
continue
# check whether clients that the task is waiting are all dead
dead_clients = self._get_task_dead_clients(task)
if dead_clients:
self.logger.info(f"client {dead_clients} is dead - set task {task.name} to TIMEOUT")
task.completion_status = TaskCompletionStatus.CLIENT_DEAD
exit_tasks.append(task)
continue
for exit_task in exit_tasks:
exit_task.is_standing = False
self.logger.debug(
"Removing task={}, completion_status={}".format(exit_task, exit_task.completion_status)
)
self._tasks.remove(exit_task)
for client_task in exit_task.client_tasks:
self.logger.debug("Removing client_task with id={}".format(client_task.id))
self._client_task_map.pop(client_task.id)
# do the task exit processing outside the lock to minimize the locking time
# and to avoid potential deadlock since the CB could schedule another task
if len(exit_tasks) <= 0:
return
with self._engine.new_context() as fl_ctx:
for exit_task in exit_tasks:
with exit_task.cb_lock:
self.log_info(
fl_ctx, "task {} exit with status {}".format(exit_task.name, exit_task.completion_status)
)
if exit_task.task_done_cb is not None:
try:
exit_task.task_done_cb(task=exit_task, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(
fl_ctx,
"processing error in task_done_cb error on task {}: {}".format(
exit_task.name, secure_format_exception(e)
),
)
exit_task.completion_status = TaskCompletionStatus.ERROR
exit_task.exception = e
def _get_task_dead_clients(self, task: Task):
"""
See whether the task is only waiting for response from a dead client
"""
now = time.time()
lead_time = ConfigService.get_float_var(name=_CONFIG_VAR_DEAD_CLIENT_CHECK_LEAD_TIME, default=30.0)
if now - task.schedule_time < lead_time:
# due to potential race conditions, we'll wait for at least 1 minute after the task
# is started before checking dead clients.
return None
dead_clients = []
with self._dead_clients_lock:
for target in task.targets:
ct = _get_client_task(target, task)
if ct is not None and ct.result_received_time:
# response has been received from this client
continue
# either we have not sent the task to this client or we have not received response
# is the client already dead?
if self._client_still_alive(target):
# this client is still alive
# we let the task continue its course since we still have live clients
return None
else:
# this client is dead - remember it
dead_clients.append(target)
return dead_clients
@staticmethod
def _process_finished_task(task, func):
def wrap(*args, **kwargs):
if func:
func(*args, **kwargs)
task.props[_TASK_KEY_DONE] = True
return wrap
def wait_for_task(self, task: Task, abort_signal: Signal):
task.props[_TASK_KEY_DONE] = False
task.task_done_cb = self._process_finished_task(task=task, func=task.task_done_cb)
while True:
if task.completion_status is not None:
break
if abort_signal and abort_signal.triggered:
self.cancel_task(task, fl_ctx=None, completion_status=TaskCompletionStatus.ABORTED)
break
task_done = task.props[_TASK_KEY_DONE]
if task_done:
break
time.sleep(self._task_check_period)
def _job_policy_violated(self):
if not self._engine:
return False
with self._engine.new_context() as fl_ctx:
clients = self._engine.get_clients()
with self._dead_clients_lock:
alive_clients = []
dead_clients = []
for client in clients:
if self._client_still_alive(client.name):
alive_clients.append(client.name)
else:
dead_clients.append(client.name)
if not dead_clients:
return False
if not alive_clients:
self.log_error(fl_ctx, f"All clients are dead: {dead_clients}")
return True
job_meta = fl_ctx.get_prop(FLContextKey.JOB_META)
job = job_from_meta(job_meta)
if len(alive_clients) < job.min_sites:
self.log_error(fl_ctx, f"Alive clients {len(alive_clients)} < required min {job.min_sites}")
return True
# check required clients:
if dead_clients and job.required_sites:
dead_required_clients = [c for c in dead_clients if c in job.required_sites]
if dead_required_clients:
self.log_error(fl_ctx, f"Required client(s) dead: {dead_required_clients}")
return True
return False
def _client_still_alive(self, client_name):
now = time.time()
report_time = self._dead_client_reports.get(client_name, None)
grace_period = ConfigService.get_float_var(name=_CONFIG_VAR_DEAD_CLIENT_GRACE_PERIOD, default=30.0)
if not report_time:
# this client is still alive
return True
elif now - report_time < grace_period:
# this report is still fresh - consider the client to be still alive
return True
return False
| NVFlare-main | nvflare/apis/impl/controller.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/apis/impl/__init__.py |
Subsets and Splits