python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from nvflare.apis.controller_spec import ClientTask
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import Controller
class ErrorHandlingController(Controller, ABC):
def __init__(self):
super().__init__()
self.abort_job_in_error = {
ReturnCode.EXECUTION_EXCEPTION: True,
ReturnCode.TASK_UNKNOWN: True,
ReturnCode.EXECUTION_RESULT_ERROR: False,
ReturnCode.TASK_DATA_FILTER_ERROR: True,
ReturnCode.TASK_RESULT_FILTER_ERROR: True,
}
def handle_client_errors(self, rc: str, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
task_name = client_task.task.name
abort = self.abort_job_in_error[rc]
self.log_error(fl_ctx, f"error code = {rc}")
if abort:
self.system_panic(
f"Failed in client-site for {client_name} during task {task_name}.controller is exiting.",
fl_ctx=fl_ctx,
)
self.log_error(fl_ctx, f"Execution failed for {client_name}")
else:
raise ValueError(f"Execution result is not received for {client_name}")
| NVFlare-main | nvflare/app_common/workflows/error_handling_controller.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.response_processors.global_weights_initializer import GlobalWeightsInitializer, WeightMethod
from .broadcast_and_process import BroadcastAndProcess
class InitializeGlobalWeights(BroadcastAndProcess):
def __init__(
self,
task_name: str = AppConstants.TASK_GET_WEIGHTS,
min_responses_required: int = 0,
wait_time_after_min_received: int = 0,
task_timeout: int = 0,
weights_prop_name=AppConstants.GLOBAL_MODEL,
weight_method: str = WeightMethod.FIRST,
weights_client_name: Union[str, List[str], None] = None,
):
"""A controller for initializing global model weights based on reported weights from clients.
Args:
task_name: name of the task to be sent to clients to collect their model weights
min_responses_required: min number of responses required. 0 means all clients.
wait_time_after_min_received: how long (secs) to wait after min responses are received
task_timeout: max amount of time to wait for the task to end. 0 means never time out.
weights_prop_name: name of the FL Context property to store the global weights
weight_method: method for determining global model weights. Defaults to `WeightMethod.FIRST`.
weights_client_name: name of the client if the method is "client". Defaults to None.
If `None`, the task will be sent to all clients (to be used with `weight_method=WeightMethod.FIRST`).
If list of client names, the task will be only be sent to the listed clients.
"""
if isinstance(weights_client_name, str):
clients = [weights_client_name]
elif isinstance(weights_client_name, list):
clients = weights_client_name
else:
clients = None
BroadcastAndProcess.__init__(
self,
processor=GlobalWeightsInitializer(
weights_prop_name=weights_prop_name, weight_method=weight_method, client_name=weights_client_name
),
task_name=task_name,
min_responses_required=min_responses_required,
wait_time_after_min_received=wait_time_after_min_received,
timeout=task_timeout,
clients=clients,
)
| NVFlare-main | nvflare/app_common/workflows/initialize_global_weights.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
class SplitNNDataKind(object):
ACTIVATIONS = "_splitnn_activations_"
GRADIENT = "_splitnn_gradient_"
class SplitNNConstants(object):
BATCH_INDICES = "_splitnn_batch_indices_"
DATA = "_splitnn_data_"
BATCH_SIZE = "_splitnn_batch_size_"
TARGET_NAMES = "_splitnn_target_names_"
TASK_INIT_MODEL = "_splitnn_task_init_model_"
TASK_TRAIN_LABEL_STEP = "_splitnn_task_train_label_step_"
TASK_VALID_LABEL_STEP = "_splitnn_task_valid_label_step_"
TASK_TRAIN = "_splitnn_task_train_"
TASK_RESULT = "_splitnn_task_result_"
TIMEOUT = 60.0 # timeout for waiting for reply from aux message request
class SplitNNController(Controller):
def __init__(
self,
num_rounds: int = 5000,
start_round: int = 0,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID, # used to init the models on both clients
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
init_model_task_name=SplitNNConstants.TASK_INIT_MODEL,
train_task_name=SplitNNConstants.TASK_TRAIN,
task_timeout: int = 10,
ignore_result_error: bool = True,
batch_size: int = 256,
):
"""The controller for Split Learning Workflow.
The SplitNNController workflow defines Federated training on all clients.
The model persistor (persistor_id) is used to load the initial global model which is sent to all clients.
Each clients sends it's updated weights after local training which is aggregated (aggregator_id). The
shareable generator is used to convert the aggregated weights to shareable and shareable back to weights.
The model_persistor also saves the model after training.
Args:
num_rounds (int, optional): The total number of training rounds. Defaults to 5.
start_round (int, optional): Start round for training. Defaults to 0.
persistor_id (str, optional): ID of the persistor component. Defaults to "persistor".
shareable_generator_id (str, optional): ID of the shareable generator. Defaults to "shareable_generator".
init_model_task_name: Task name used to initialize the local models.
train_task_name: Task name used for split learning.
task_timeout (int, optional): timeout (in sec) to determine if one client fails
to request the task which it is assigned to. Defaults to 10.
ignore_result_error (bool, optional): whether this controller can proceed if result has errors. Defaults to True.
Raises:
TypeError: when any of input arguments does not have correct type
ValueError: when any of input arguments is out of range
"""
Controller.__init__(self)
# Check arguments
if not isinstance(num_rounds, int):
raise TypeError("`num_rounds` must be int but got {}".format(type(num_rounds)))
if not isinstance(start_round, int):
raise TypeError("`start_round` must be int but got {}".format(type(start_round)))
if not isinstance(task_timeout, int):
raise TypeError("`train_timeout` must be int but got {}".format(type(task_timeout)))
if not isinstance(persistor_id, str):
raise TypeError("`persistor_id` must be a string but got {}".format(type(persistor_id)))
if not isinstance(shareable_generator_id, str):
raise TypeError("`shareable_generator_id` must be a string but got {}".format(type(shareable_generator_id)))
if not isinstance(init_model_task_name, str):
raise TypeError("`init_model_task_name` must be a string but got {}".format(type(init_model_task_name)))
if not isinstance(train_task_name, str):
raise TypeError("`train_task_name` must be a string but got {}".format(type(train_task_name)))
if num_rounds < 0:
raise ValueError("num_rounds must be greater than or equal to 0.")
if start_round < 0:
raise ValueError("start_round must be greater than or equal to 0.")
self.persistor_id = persistor_id
self.shareable_generator_id = shareable_generator_id
self.persistor = None
self.shareable_generator = None
# config data
self._num_rounds = num_rounds
self._start_round = start_round
self._task_timeout = task_timeout
self.ignore_result_error = ignore_result_error
# workflow phases: init, train, validate
self._phase = AppConstants.PHASE_INIT
self._global_weights = None
self._current_round = None
# task names
self.init_model_task_name = init_model_task_name
self.train_task_name = train_task_name
self.targets_names = ["site-1", "site-2"]
self.nr_supported_clients = 2
self.batch_size = batch_size
def start_controller(self, fl_ctx: FLContext):
self.log_debug(fl_ctx, "starting controller")
self.persistor = fl_ctx.get_engine().get_component(self.persistor_id)
self.shareable_generator = fl_ctx.get_engine().get_component(self.shareable_generator_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic(
f"Persistor {self.persistor_id} must be a Persistor instance, but got {type(self.persistor)}", fl_ctx
)
if not isinstance(self.shareable_generator, ShareableGenerator):
self.system_panic(
f"Shareable generator {self.shareable_generator_id} must be a Shareable Generator instance, "
f"but got {type(self.shareable_generator)}",
fl_ctx,
)
# initialize global model
fl_ctx.set_prop(AppConstants.START_ROUND, self._start_round, private=True, sticky=True)
fl_ctx.set_prop(AppConstants.NUM_ROUNDS, self._num_rounds, private=True, sticky=False)
self._global_weights = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_weights, private=True, sticky=True)
self.fire_event(AppEventType.INITIAL_MODEL_LOADED, fl_ctx)
def _process_result(self, client_task: ClientTask, fl_ctx: FLContext) -> bool:
# submitted shareable is stored in client_task.result
# we need to update task.data with that shareable so the next target
# will get the updated shareable
task = client_task.task
result = client_task.result
rc = result.get_return_code()
if rc and rc != ReturnCode.OK:
if self.ignore_result_error:
self.log_error(fl_ctx, f"Ignore the task {task} result. Train result error code: {rc}")
return False
else:
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.system_panic(
f"Peer context for task {task} is bad or missing. SplitNNController exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.system_panic(
f"Execution Exception in client task {task}. SplitNNController exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
]:
self.system_panic(
f"Execution result for task {task} is not a shareable. SplitNNController exiting.",
fl_ctx=fl_ctx,
)
return False
# assign result to current task
if result:
task.set_prop(SplitNNConstants.TASK_RESULT, result)
return True
def _check_targets(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
targets = engine.get_clients()
for t in targets:
if t.name not in self.targets_names:
self.system_panic(f"Client {t.name} not in expected target names: {self.targets_names}", fl_ctx)
def _init_models(self, abort_signal: Signal, fl_ctx: FLContext):
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"SplitNN initializing model {self.targets_names}.")
# Create init_model_task_name
data_shareable: Shareable = self.shareable_generator.learnable_to_shareable(self._global_weights, fl_ctx)
task = Task(
name=self.init_model_task_name,
data=data_shareable,
result_received_cb=self._process_result,
)
self.broadcast_and_wait(
task=task,
min_responses=self.nr_supported_clients,
wait_time_after_min_received=0,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
def _train(self, abort_signal: Signal, fl_ctx: FLContext):
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"SplitNN training starting with {self.targets_names}.")
# Create train_task
data_shareable: Shareable = Shareable()
data_shareable.set_header(AppConstants.NUM_ROUNDS, self._num_rounds)
data_shareable.set_header(SplitNNConstants.BATCH_SIZE, self.batch_size)
data_shareable.set_header(SplitNNConstants.TARGET_NAMES, self.targets_names)
task = Task(
name=self.train_task_name,
data=data_shareable,
result_received_cb=self._process_result,
)
self.broadcast_and_wait(
task=task,
min_responses=self.nr_supported_clients,
wait_time_after_min_received=0,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
try:
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"Train with on {self.targets_names}")
# 1. initialize models on clients
self._init_models(abort_signal=abort_signal, fl_ctx=fl_ctx)
# 2. Start split learning
self._phase = AppConstants.PHASE_TRAIN
self._train(abort_signal=abort_signal, fl_ctx=fl_ctx)
self._phase = AppConstants.PHASE_FINISHED
self.log_debug(fl_ctx, "SplitNN training ended.")
except Exception as e:
error_msg = f"SplitNN control_flow exception {secure_format_exception(e)}"
self.log_error(fl_ctx, error_msg)
self.system_panic(error_msg, fl_ctx)
def stop_controller(self, fl_ctx: FLContext):
self._phase = AppConstants.PHASE_FINISHED
self.log_debug(fl_ctx, "controller stopped")
def process_result_of_unknown_task(
self,
client: Client,
task_name: str,
client_task_id: str,
result: Shareable,
fl_ctx: FLContext,
):
self.log_warning(fl_ctx, f"Dropped result of unknown task: {task_name} from client {client.name}.")
def handle_event(self, event_type: str, fl_ctx: FLContext):
super().handle_event(event_type, fl_ctx)
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR, None)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError("collector must be GroupInfoCollector but got {}".format(type(collector)))
collector.add_info(
group_name=self._name,
info={"phase": self._phase, "current_round": self._current_round, "num_rounds": self._num_rounds},
)
| NVFlare-main | nvflare/app_common/workflows/splitnn_workflow.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import Dict, List, Optional, Union
from nvflare.apis.client import Client
from nvflare.apis.dxo import DXO, from_shareable
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.workflows.error_handling_controller import ErrorHandlingController
class BroadcastAndWait(FLComponent):
def __init__(self, fl_ctx: FLContext, controller: ErrorHandlingController):
super().__init__()
self.lock = threading.Lock()
self.fl_ctx = fl_ctx
self.controller = controller
self.task = None
# [target, DXO]
self.results: Dict[str, DXO] = {}
def broadcast_and_wait(
self,
task_name: str,
task_input: Shareable,
targets: Union[List[Client], List[str], None] = None,
task_props: Optional[Dict] = None,
min_responses: int = 1,
abort_signal: Signal = None,
) -> Dict[str, DXO]:
task = Task(name=task_name, data=task_input, result_received_cb=self.results_cb, props=task_props)
self.controller.broadcast_and_wait(task, self.fl_ctx, targets, min_responses, 0, abort_signal)
return self.results
def multicasts_and_wait(
self,
task_name: str,
task_inputs: Dict[str, Shareable],
abort_signal: Signal = None,
) -> Dict[str, DXO]:
tasks: Dict[str, Task] = self.get_tasks(task_name, task_inputs)
for client_name in tasks:
self.controller.broadcast(task=tasks[client_name], fl_ctx=self.fl_ctx, targets=[client_name])
for client_name in tasks:
self.log_info(self.fl_ctx, f"wait for client {client_name} task")
self.controller.wait_for_task(tasks[client_name], abort_signal)
return self.results
def get_tasks(self, task_name: str, task_inputs: Dict[str, Shareable]) -> Dict[str, Task]:
tasks = {}
for client_name in task_inputs:
task = Task(name=task_name, data=task_inputs[client_name], result_received_cb=self.results_cb)
tasks[client_name] = task
return tasks
def update_result(self, client_name: str, dxo: DXO):
try:
self.lock.acquire()
self.log_debug(self.fl_ctx, "Acquired a lock")
self.results.update({client_name: dxo})
finally:
self.log_debug(self.fl_ctx, "Released a lock")
self.lock.release()
def results_cb(self, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
task_name = client_task.task.name
print("task_name", task_name)
self.log_info(fl_ctx, f"Processing {task_name}, {self.task} result from client {client_name}")
result = client_task.result
rc = result.get_return_code()
if rc == ReturnCode.OK:
self.log_info(fl_ctx, f"Received result from client:{client_name} for task {task_name} ")
dxo = from_shareable(result)
self.update_result(client_name, dxo)
else:
if rc in self.controller.abort_job_in_error.keys():
self.controller.handle_client_errors(rc, client_task, fl_ctx)
# Cleanup task result
client_task.result = None
| NVFlare-main | nvflare/app_common/workflows/broadcast_operator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Callable, Dict, List, Optional
from nvflare.apis.client import Client
from nvflare.apis.dxo import from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.statistics_spec import Bin, Histogram, StatisticConfig
from nvflare.app_common.abstract.statistics_writer import StatisticsWriter
from nvflare.app_common.app_constant import StatisticsConstants as StC
from nvflare.app_common.statistics.numeric_stats import get_global_stats
from nvflare.app_common.statistics.statisitcs_objects_decomposer import fobs_registration
from nvflare.fuel.utils import fobs
class StatisticsController(Controller):
def __init__(
self,
statistic_configs: Dict[str, dict],
writer_id: str,
wait_time_after_min_received: int = 1,
result_wait_timeout: int = 10,
precision=4,
min_clients: Optional[int] = None,
enable_pre_run_task: bool = True,
):
"""Controller for Statistics.
Args:
statistic_configs: defines the input statistic to be computed and each statistic's configuration, see below for details.
writer_id: ID for StatisticsWriter. The StatisticWriter will save the result to output specified by the
StatisticsWriter
wait_time_after_min_received: numbers of seconds to wait after minimum numer of clients specified has received.
result_wait_timeout: numbers of seconds to wait until we received all results.
Notice this is after the min_clients have arrived, and we wait for result process
callback, this becomes important if the data size to be processed is large
precision: number of precision digits
min_clients: if specified, min number of clients we have to wait before process.
For statistic_configs, the key is one of statistics' names sum, count, mean, stddev, histogram, and
the value is the arguments needed. All other statistics except histogram require no argument.
.. code-block:: text
"statistic_configs": {
"count": {},
"mean": {},
"sum": {},
"stddev": {},
"histogram": {
"*": {"bins": 20},
"Age": {"bins": 10, "range": [0, 120]}
}
},
Histogram requires the following arguments:
1) numbers of bins or buckets of the histogram
2) the histogram range values [min, max]
These arguments are different for each feature. Here are few examples:
.. code-block:: text
"histogram": {
"*": {"bins": 20 },
"Age": {"bins": 10, "range":[0,120]}
}
The configuration specifies that the
feature 'Age' will have 10 bins for and the range is within [0, 120).
For all other features, the default ("*") configuration is used, with bins = 20.
The range of histogram is not specified, thus requires the Statistics controller
to dynamically estimate histogram range for each feature. Then this estimated global
range (est global min, est. global max) will be used as the histogram range.
To dynamically estimate such a histogram range, we need the client to provide the local
min and max values in order to calculate the global bin and max value. In order to protect
data privacy and avoid data leakage, a noise level is added to the local min/max
value before sending to the controller. Therefore the controller only gets the 'estimated'
values, and the global min/max are estimated, or more accurately, they are noised global min/max
values.
Here is another example:
.. code-block:: text
"histogram": {
"density": {"bins": 10, "range":[0,120]}
}
In this example, there is no default histogram configuration for other features.
This will work correctly if there is only one feature called "density"
but will fail if there are other features in the dataset.
In the following configuration:
.. code-block:: text
"statistic_configs": {
"count": {},
"mean": {},
"stddev": {}
}
Only count, mean and stddev statistics are specified, so the statistics_controller
will only set tasks to calculate these three statistics.
"""
super().__init__()
self.statistic_configs: Dict[str, dict] = statistic_configs
self.writer_id = writer_id
self.task_name = StC.FED_STATS_TASK
self.client_statistics = {}
self.global_statistics = {}
self.client_features = {}
self.result_wait_timeout = result_wait_timeout
self.wait_time_after_min_received = wait_time_after_min_received
self.precision = precision
self.min_clients = min_clients
self.result_cb_status = {}
self.client_handshake_ok = {}
self.enable_pre_run_task = enable_pre_run_task
self.result_callback_fns: Dict[str, Callable] = {
StC.STATS_1st_STATISTICS: self.results_cb,
StC.STATS_2nd_STATISTICS: self.results_cb,
}
fobs_registration()
self.fl_ctx = None
self.abort_job_in_error = {
ReturnCode.EXECUTION_EXCEPTION: True,
ReturnCode.TASK_UNKNOWN: True,
ReturnCode.EXECUTION_RESULT_ERROR: False,
ReturnCode.TASK_DATA_FILTER_ERROR: True,
ReturnCode.TASK_RESULT_FILTER_ERROR: True,
}
def start_controller(self, fl_ctx: FLContext):
if self.statistic_configs is None or len(self.statistic_configs) == 0:
self.system_panic(
"At least one statistic_config must be configured for task StatisticsController", fl_ctx=fl_ctx
)
self.fl_ctx = fl_ctx
clients = fl_ctx.get_engine().get_clients()
if not self.min_clients:
self.min_clients = len(clients)
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
self.log_info(fl_ctx, f"{self.task_name} control flow started.")
if abort_signal.triggered:
return False
if self.enable_pre_run_task:
self.pre_run_task_flow(abort_signal, fl_ctx)
self.statistics_task_flow(abort_signal, fl_ctx, StC.STATS_1st_STATISTICS)
self.statistics_task_flow(abort_signal, fl_ctx, StC.STATS_2nd_STATISTICS)
if not StatisticsController._wait_for_all_results(
self.logger, self.result_wait_timeout, self.min_clients, self.client_statistics, 1.0, abort_signal
):
self.log_info(fl_ctx, f"task {self.task_name} timeout on wait for all results.")
return False
self.log_info(fl_ctx, "start post processing")
self.post_fn(self.task_name, fl_ctx)
self.log_info(fl_ctx, f"task {self.task_name} control flow end.")
def stop_controller(self, fl_ctx: FLContext):
pass
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
pass
def _get_all_statistic_configs(self) -> List[StatisticConfig]:
all_statistics = {
StC.STATS_COUNT: StatisticConfig(StC.STATS_COUNT, {}),
StC.STATS_FAILURE_COUNT: StatisticConfig(StC.STATS_FAILURE_COUNT, {}),
StC.STATS_SUM: StatisticConfig(StC.STATS_SUM, {}),
StC.STATS_MEAN: StatisticConfig(StC.STATS_MEAN, {}),
StC.STATS_VAR: StatisticConfig(StC.STATS_VAR, {}),
StC.STATS_STDDEV: StatisticConfig(StC.STATS_STDDEV, {}),
}
if StC.STATS_HISTOGRAM in self.statistic_configs:
hist_config = self.statistic_configs[StC.STATS_HISTOGRAM]
all_statistics[StC.STATS_MIN] = StatisticConfig(StC.STATS_MIN, hist_config)
all_statistics[StC.STATS_MAX] = StatisticConfig(StC.STATS_MAX, hist_config)
all_statistics[StC.STATS_HISTOGRAM] = StatisticConfig(StC.STATS_HISTOGRAM, hist_config)
return [all_statistics[k] for k in all_statistics if k in self.statistic_configs]
def pre_run_task_flow(self, abort_signal: Signal, fl_ctx: FLContext):
client_name = fl_ctx.get_identity_name()
self.log_info(fl_ctx, f"start pre_run task for client {client_name}")
inputs = Shareable()
target_statistics: List[StatisticConfig] = self._get_all_statistic_configs()
inputs[StC.STATS_TARGET_STATISTICS] = fobs.dumps(target_statistics)
results_cb_fn = self.results_pre_run_cb
if abort_signal.triggered:
return False
task = Task(name=StC.FED_STATS_PRE_RUN, data=inputs, result_received_cb=results_cb_fn)
self.broadcast_and_wait(
task=task,
targets=None,
min_responses=self.min_clients,
fl_ctx=fl_ctx,
wait_time_after_min_received=self.wait_time_after_min_received,
abort_signal=abort_signal,
)
self.log_info(fl_ctx, f" client {client_name} pre_run task flow end.")
def statistics_task_flow(self, abort_signal: Signal, fl_ctx: FLContext, statistic_task: str):
self.log_info(fl_ctx, f"start prepare inputs for task {statistic_task}")
inputs = self._prepare_inputs(statistic_task)
results_cb_fn = self._get_result_cb(statistic_task)
self.log_info(fl_ctx, f"task: {self.task_name} statistics_flow for {statistic_task} started.")
if abort_signal.triggered:
return False
task_props = {StC.STATISTICS_TASK_KEY: statistic_task}
task = Task(name=self.task_name, data=inputs, result_received_cb=results_cb_fn, props=task_props)
self.broadcast_and_wait(
task=task,
targets=None,
min_responses=self.min_clients,
fl_ctx=fl_ctx,
wait_time_after_min_received=self.wait_time_after_min_received,
abort_signal=abort_signal,
)
self.global_statistics = get_global_stats(self.global_statistics, self.client_statistics, statistic_task)
self.log_info(fl_ctx, f"task {self.task_name} statistics_flow for {statistic_task} flow end.")
def handle_client_errors(self, rc: str, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
task_name = client_task.task.name
abort = self.abort_job_in_error[rc]
if abort:
self.system_panic(
f"Failed in client-site statistics_executor for {client_name} during task {task_name}."
f"statistics controller is exiting.",
fl_ctx=fl_ctx,
)
self.log_info(fl_ctx, f"Execution failed for {client_name}")
else:
self.log_info(fl_ctx, f"Execution result is not received for {client_name}")
def results_pre_run_cb(self, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
task_name = client_task.task.name
self.log_info(fl_ctx, f"Processing {task_name} pre_run from client {client_name}")
result = client_task.result
rc = result.get_return_code()
if rc == ReturnCode.OK:
self.log_info(fl_ctx, f"Received pre-run handshake result from client:{client_name} for task {task_name}")
self.client_handshake_ok = {client_name: True}
fl_ctx.set_prop(StC.PRE_RUN_RESULT, {client_name: from_shareable(result)})
self.fire_event(EventType.PRE_RUN_RESULT_AVAILABLE, fl_ctx)
else:
if rc in self.abort_job_in_error.keys():
self.handle_client_errors(rc, client_task, fl_ctx)
self.client_handshake_ok = {client_name: False}
# Cleanup task result
client_task.result = None
def results_cb(self, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
task_name = client_task.task.name
self.log_info(fl_ctx, f"Processing {task_name} result from client {client_name}")
result = client_task.result
rc = result.get_return_code()
if rc == ReturnCode.OK:
self.log_info(fl_ctx, f"Received result entries from client:{client_name}, " f"for task {task_name}")
dxo = from_shareable(result)
client_result = dxo.data
statistics_task = client_result[StC.STATISTICS_TASK_KEY]
self.log_info(fl_ctx, f"handle client {client_name} results for statistics task: {statistics_task}")
statistics = fobs.loads(client_result[statistics_task])
for statistic in statistics:
if statistic not in self.client_statistics:
self.client_statistics[statistic] = {client_name: statistics[statistic]}
else:
self.client_statistics[statistic].update({client_name: statistics[statistic]})
ds_features = client_result.get(StC.STATS_FEATURES, None)
if ds_features:
self.client_features.update({client_name: fobs.loads(ds_features)})
elif rc in self.abort_job_in_error.keys():
self.handle_client_errors(rc, client_task, fl_ctx)
self.result_cb_status[client_name] = {client_task.task.props[StC.STATISTICS_TASK_KEY]: False}
else:
self.result_cb_status[client_name] = {client_task.task.props[StC.STATISTICS_TASK_KEY]: True}
self.result_cb_status[client_name] = {client_task.task.props[StC.STATISTICS_TASK_KEY]: True}
# Cleanup task result
client_task.result = None
def _validate_min_clients(self, min_clients: int, client_statistics: dict) -> bool:
self.logger.info("check if min_client result received for all features")
resulting_clients = {}
for statistic in client_statistics:
clients = client_statistics[statistic].keys()
if len(clients) < min_clients:
return False
for client in clients:
ds_feature_statistics = client_statistics[statistic][client]
for ds_name in ds_feature_statistics:
if ds_name not in resulting_clients:
resulting_clients[ds_name] = set()
if ds_feature_statistics[ds_name]:
resulting_clients[ds_name].update([client])
for ds in resulting_clients:
if len(resulting_clients[ds]) < min_clients:
return False
return True
def post_fn(self, task_name: str, fl_ctx: FLContext):
ok_to_proceed = self._validate_min_clients(self.min_clients, self.client_statistics)
if not ok_to_proceed:
self.system_panic(f"not all required {self.min_clients} received, abort the job.", fl_ctx)
else:
self.log_info(fl_ctx, "combine all clients' statistics")
ds_stats = self._combine_all_statistics()
self.log_info(fl_ctx, "save statistics result to persistence store")
writer: StatisticsWriter = fl_ctx.get_engine().get_component(self.writer_id)
writer.save(ds_stats, overwrite_existing=True, fl_ctx=fl_ctx)
def _combine_all_statistics(self):
result = {}
filtered_client_statistics = [
statistic for statistic in self.client_statistics if statistic in self.statistic_configs
]
filtered_global_statistics = [
statistic for statistic in self.global_statistics if statistic in self.statistic_configs
]
for statistic in filtered_client_statistics:
for client in self.client_statistics[statistic]:
for ds in self.client_statistics[statistic][client]:
client_dataset = f"{client}-{ds}"
for feature_name in self.client_statistics[statistic][client][ds]:
if feature_name not in result:
result[feature_name] = {}
if statistic not in result[feature_name]:
result[feature_name][statistic] = {}
if statistic == StC.STATS_HISTOGRAM:
hist: Histogram = self.client_statistics[statistic][client][ds][feature_name]
buckets = StatisticsController._apply_histogram_precision(hist.bins, self.precision)
result[feature_name][statistic][client_dataset] = buckets
else:
result[feature_name][statistic][client_dataset] = round(
self.client_statistics[statistic][client][ds][feature_name], self.precision
)
precision = self.precision
for statistic in filtered_global_statistics:
for ds in self.global_statistics[statistic]:
global_dataset = f"{StC.GLOBAL}-{ds}"
for feature_name in self.global_statistics[statistic][ds]:
if statistic == StC.STATS_HISTOGRAM:
hist: Histogram = self.global_statistics[statistic][ds][feature_name]
buckets = StatisticsController._apply_histogram_precision(hist.bins, self.precision)
result[feature_name][statistic][global_dataset] = buckets
else:
result[feature_name][statistic].update(
{global_dataset: round(self.global_statistics[statistic][ds][feature_name], precision)}
)
return result
@staticmethod
def _apply_histogram_precision(bins: List[Bin], precision) -> List[Bin]:
buckets = []
for bucket in bins:
buckets.append(
Bin(
round(bucket.low_value, precision),
round(bucket.high_value, precision),
bucket.sample_count,
)
)
return buckets
@staticmethod
def _get_target_statistics(statistic_configs: dict, ordered_statistics: list) -> List[StatisticConfig]:
# make sure the execution order of the statistics calculation
targets = []
if statistic_configs:
for statistic in statistic_configs:
# if target statistic has histogram, we are not in 2nd statistic task
# we only need to estimate the global min/max if we have histogram statistic,
# If the user provided the global min/max for a specified feature, then we do nothing
# if the user did not provide the global min/max for the feature, then we need to ask
# client to provide the local estimated min/max for that feature.
# then we used the local estimate min/max to estimate global min/max.
# to do that, we calculate the local min/max in 1st statistic task.
# in all cases, we will still send the STATS_MIN/MAX tasks, but client executor may or may not
# delegate to stats generator to calculate the local min/max depends on if the global bin ranges
# are specified. to do this, we send over the histogram configuration when calculate the local min/max
if statistic == StC.STATS_HISTOGRAM and statistic not in ordered_statistics:
targets.append(StatisticConfig(StC.STATS_MIN, statistic_configs[StC.STATS_HISTOGRAM]))
targets.append(StatisticConfig(StC.STATS_MAX, statistic_configs[StC.STATS_HISTOGRAM]))
if statistic == StC.STATS_STDDEV and statistic in ordered_statistics:
targets.append(StatisticConfig(StC.STATS_VAR, {}))
for rm in ordered_statistics:
if rm == statistic:
targets.append(StatisticConfig(statistic, statistic_configs[statistic]))
return targets
def _prepare_inputs(self, statistic_task: str) -> Shareable:
inputs = Shareable()
target_statistics: List[StatisticConfig] = StatisticsController._get_target_statistics(
self.statistic_configs, StC.ordered_statistics[statistic_task]
)
for tm in target_statistics:
if tm.name == StC.STATS_HISTOGRAM:
if StC.STATS_MIN in self.global_statistics:
inputs[StC.STATS_MIN] = self.global_statistics[StC.STATS_MIN]
if StC.STATS_MAX in self.global_statistics:
inputs[StC.STATS_MAX] = self.global_statistics[StC.STATS_MAX]
if tm.name == StC.STATS_VAR:
if StC.STATS_COUNT in self.global_statistics:
inputs[StC.STATS_GLOBAL_COUNT] = self.global_statistics[StC.STATS_COUNT]
if StC.STATS_MEAN in self.global_statistics:
inputs[StC.STATS_GLOBAL_MEAN] = self.global_statistics[StC.STATS_MEAN]
inputs[StC.STATISTICS_TASK_KEY] = statistic_task
inputs[StC.STATS_TARGET_STATISTICS] = fobs.dumps(target_statistics)
return inputs
@staticmethod
def _wait_for_all_results(
logger,
result_wait_timeout: float,
requested_client_size: int,
client_statistics: dict,
sleep_time: float = 1,
abort_signal=None,
) -> bool:
"""Waits for all results.
For each statistic, we check if the number of requested clients (min_clients or all clients)
is available, if not, we wait until result_wait_timeout.
result_wait_timeout is reset for next statistic. result_wait_timeout is per statistic, not overall
timeout for all results.
Args:
result_wait_timeout: timeout we have to wait for each statistic. reset for each statistic
requested_client_size: requested client size, usually min_clients or all clients
client_statistics: client specific statistics received so far
abort_signal: abort signal
Returns: False, when job is aborted else True
"""
# record of each statistic, number of clients processed
statistics_client_received = {}
# current statistics obtained so far (across all clients)
statistic_names = client_statistics.keys()
for m in statistic_names:
statistics_client_received[m] = len(client_statistics[m].keys())
timeout = result_wait_timeout
for m in statistics_client_received:
if requested_client_size > statistics_client_received[m]:
t = 0
while t < timeout and requested_client_size > statistics_client_received[m]:
if abort_signal and abort_signal.triggered:
return False
msg = (
f"not all client received the statistic '{m}', need to wait for {sleep_time} seconds."
f"currently available clients are '{client_statistics[m].keys()}'."
)
logger.info(msg)
time.sleep(sleep_time)
t += sleep_time
# check and update number of client processed for statistics again
statistics_client_received[m] = len(client_statistics[m].keys())
return True
def _get_result_cb(self, statistics_task: str):
return self.result_callback_fns[statistics_task]
| NVFlare-main | nvflare/app_common/workflows/statistics_controller.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from typing import Union
from nvflare.apis.client import Client
from nvflare.apis.controller_spec import ClientTask, Task
from nvflare.apis.dxo import DXO, from_bytes, from_shareable, get_leaf_dxos
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import Controller
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.apis.workspace import Workspace
from nvflare.app_common.abstract.formatter import Formatter
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_common.app_constant import AppConstants, ModelName
from nvflare.app_common.app_event_type import AppEventType
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
class CrossSiteModelEval(Controller):
def __init__(
self,
task_check_period=0.5,
cross_val_dir=AppConstants.CROSS_VAL_DIR,
submit_model_timeout=600,
validation_timeout: int = 6000,
model_locator_id="",
formatter_id="",
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
validation_task_name=AppConstants.TASK_VALIDATION,
cleanup_models=False,
participating_clients=None,
wait_for_clients_timeout=300,
):
"""Cross Site Model Validation workflow.
Args:
task_check_period (float, optional): How often to check for new tasks or tasks being finished.
Defaults to 0.5.
cross_val_dir (str, optional): Path to cross site validation directory relative to run directory.
Defaults to "cross_site_val".
submit_model_timeout (int, optional): Timeout of submit_model_task. Defaults to 600 secs.
validation_timeout (int, optional): Timeout for validate_model task. Defaults to 6000 secs.
model_locator_id (str, optional): ID for model_locator component. Defaults to "".
formatter_id (str, optional): ID for formatter component. Defaults to "".
submit_model_task_name (str, optional): Name of submit_model task. Defaults to "".
validation_task_name (str, optional): Name of validate_model task. Defaults to "validate".
cleanup_models (bool, optional): Whether or not models should be deleted after run. Defaults to False.
participating_clients (list, optional): List of participating client names. If not provided, defaults
to all clients connected at start of controller.
wait_for_clients_timeout (int, optional): Timeout for clients to appear. Defaults to 300 secs
"""
super().__init__(task_check_period=task_check_period)
if not isinstance(task_check_period, float):
raise TypeError("task_check_period must be float but got {}".format(type(task_check_period)))
if not isinstance(cross_val_dir, str):
raise TypeError("cross_val_dir must be a string but got {}".format(type(cross_val_dir)))
if not isinstance(submit_model_timeout, int):
raise TypeError("submit_model_timeout must be int but got {}".format(type(submit_model_timeout)))
if not isinstance(validation_timeout, int):
raise TypeError("validation_timeout must be int but got {}".format(type(validation_timeout)))
if not isinstance(model_locator_id, str):
raise TypeError("model_locator_id must be a string but got {}".format(type(model_locator_id)))
if not isinstance(formatter_id, str):
raise TypeError("formatter_id must be a string but got {}".format(type(formatter_id)))
if not isinstance(submit_model_task_name, str):
raise TypeError("submit_model_task_name must be a string but got {}".format(type(submit_model_task_name)))
if not isinstance(validation_task_name, str):
raise TypeError("validation_task_name must be a string but got {}".format(type(validation_task_name)))
if not isinstance(cleanup_models, bool):
raise TypeError("cleanup_models must be bool but got {}".format(type(cleanup_models)))
if participating_clients:
if not isinstance(participating_clients, list):
raise TypeError("participating_clients must be a list but got {}".format(type(participating_clients)))
if not all(isinstance(x, str) for x in participating_clients):
raise TypeError("participating_clients must be strings")
if submit_model_timeout < 0:
raise ValueError("submit_model_timeout must be greater than or equal to 0.")
if validation_timeout < 0:
raise ValueError("model_validate_timeout must be greater than or equal to 0.")
if wait_for_clients_timeout < 0:
raise ValueError("wait_for_clients_timeout must be greater than or equal to 0.")
self._cross_val_dir = cross_val_dir
self._model_locator_id = model_locator_id
self._formatter_id = formatter_id
self._submit_model_task_name = submit_model_task_name
self._validation_task_name = validation_task_name
self._submit_model_timeout = submit_model_timeout
self._validation_timeout = validation_timeout
self._wait_for_clients_timeout = wait_for_clients_timeout
self._cleanup_models = cleanup_models
self._participating_clients = participating_clients
self._val_results = {}
self._server_models = {}
self._client_models = {}
self._formatter = None
self._cross_val_models_dir = None
self._cross_val_results_dir = None
self._model_locator = None
def start_controller(self, fl_ctx: FLContext):
# If the list of participating clients is not provided, include all clients currently available.
if not self._participating_clients:
clients = self._engine.get_clients()
self._participating_clients = [c.name for c in clients]
# Create shareable dirs for models and results
workspace: Workspace = self._engine.get_workspace()
run_dir = workspace.get_run_dir(fl_ctx.get_job_id())
cross_val_path = os.path.join(run_dir, self._cross_val_dir)
self._cross_val_models_dir = os.path.join(cross_val_path, AppConstants.CROSS_VAL_MODEL_DIR_NAME)
self._cross_val_results_dir = os.path.join(cross_val_path, AppConstants.CROSS_VAL_RESULTS_DIR_NAME)
# Fire the init event.
fl_ctx.set_prop(AppConstants.CROSS_VAL_MODEL_PATH, self._cross_val_models_dir)
fl_ctx.set_prop(AppConstants.CROSS_VAL_RESULTS_PATH, self._cross_val_results_dir)
self.fire_event(AppEventType.CROSS_VAL_INIT, fl_ctx)
# Cleanup/create the cross val models and results directories
if os.path.exists(self._cross_val_models_dir):
shutil.rmtree(self._cross_val_models_dir)
if os.path.exists(self._cross_val_results_dir):
shutil.rmtree(self._cross_val_results_dir)
# Recreate new directories.
os.makedirs(self._cross_val_models_dir)
os.makedirs(self._cross_val_results_dir)
# Get components
if self._model_locator_id:
self._model_locator = self._engine.get_component(self._model_locator_id)
if not isinstance(self._model_locator, ModelLocator):
self.system_panic(
reason="bad model locator {}: expect ModelLocator but got {}".format(
self._model_locator_id, type(self._model_locator)
),
fl_ctx=fl_ctx,
)
return
if self._formatter_id:
self._formatter = self._engine.get_component(self._formatter_id)
if not isinstance(self._formatter, Formatter):
self.system_panic(
reason=f"formatter {self._formatter_id} is not an instance of Formatter.", fl_ctx=fl_ctx
)
return
if not self._formatter:
self.log_info(fl_ctx, "Formatter not found. Stats will not be printed.")
for c_name in self._participating_clients:
self._client_models[c_name] = None
self._val_results[c_name] = {}
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
try:
# wait until there are some clients
start_time = time.time()
while not self._participating_clients:
self._participating_clients = [c.name for c in self._engine.get_clients()]
if time.time() - start_time > self._wait_for_clients_timeout:
self.log_info(fl_ctx, "No clients available - quit model validation.")
return
self.log_info(fl_ctx, "No clients available - waiting ...")
time.sleep(2.0)
if abort_signal.triggered:
self.log_info(fl_ctx, "Abort signal triggered. Finishing model validation.")
return
self.log_info(fl_ctx, f"Beginning model validation with clients: {self._participating_clients}.")
if self._submit_model_task_name:
shareable = Shareable()
shareable.set_header(AppConstants.SUBMIT_MODEL_NAME, ModelName.BEST_MODEL)
submit_model_task = Task(
name=self._submit_model_task_name,
data=shareable,
result_received_cb=self._receive_local_model_cb,
timeout=self._submit_model_timeout,
)
self.broadcast(
task=submit_model_task,
targets=self._participating_clients,
fl_ctx=fl_ctx,
min_responses=len(self._participating_clients),
)
if abort_signal.triggered:
self.log_info(fl_ctx, "Abort signal triggered. Finishing model validation.")
return
# Load server models and assign those tasks
if self._model_locator:
success = self._locate_server_models(fl_ctx)
if not success:
return
for server_model in self._server_models:
self._send_validation_task(server_model, fl_ctx)
else:
self.log_info(fl_ctx, "ModelLocator not present. No server models will be included.")
while self.get_num_standing_tasks():
if abort_signal.triggered:
self.log_info(fl_ctx, "Abort signal triggered. Finishing cross site validation.")
return
self.log_debug(fl_ctx, "Checking standing tasks to see if cross site validation finished.")
time.sleep(self._task_check_period)
except Exception as e:
error_msg = f"Exception in cross site validator control_flow: {secure_format_exception(e)}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(error_msg, fl_ctx)
def stop_controller(self, fl_ctx: FLContext):
self.cancel_all_tasks(fl_ctx=fl_ctx)
if self._cleanup_models:
self.log_info(fl_ctx, "Removing local models kept for validation.")
for model_name, model_path in self._server_models.items():
if model_path and os.path.isfile(model_path):
os.remove(model_path)
self.log_debug(fl_ctx, f"Removing server model {model_name} at {model_path}.")
for model_name, model_path in self._client_models.items():
if model_path and os.path.isfile(model_path):
os.remove(model_path)
self.log_debug(fl_ctx, f"Removing client {model_name}'s model at {model_path}.")
def _receive_local_model_cb(self, client_task: ClientTask, fl_ctx: FLContext):
client_name = client_task.client.name
result: Shareable = client_task.result
self._accept_local_model(client_name=client_name, result=result, fl_ctx=fl_ctx)
# Cleanup task result
client_task.result = None
def _before_send_validate_task_cb(self, client_task: ClientTask, fl_ctx: FLContext):
model_name = client_task.task.props[AppConstants.MODEL_OWNER]
try:
model_dxo: DXO = self._load_validation_content(model_name, self._cross_val_models_dir, fl_ctx)
except ValueError as e:
reason = f"Error in loading model shareable for {model_name}: {secure_format_exception(e)}. CrossSiteModelEval exiting."
self.log_error(fl_ctx, reason)
self.system_panic(reason, fl_ctx)
return
if not model_dxo:
self.system_panic(
f"Model contents for {model_name} not found in {self._cross_val_models_dir}. "
"CrossSiteModelEval exiting",
fl_ctx=fl_ctx,
)
return
model_shareable = model_dxo.to_shareable()
model_shareable.set_header(AppConstants.MODEL_OWNER, model_name)
model_shareable.add_cookie(AppConstants.MODEL_OWNER, model_name)
client_task.task.data = model_shareable
fl_ctx.set_prop(AppConstants.DATA_CLIENT, client_task.client, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.MODEL_OWNER, model_name, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.MODEL_TO_VALIDATE, model_shareable, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.PARTICIPATING_CLIENTS, self._participating_clients, private=True, sticky=False)
self.fire_event(AppEventType.SEND_MODEL_FOR_VALIDATION, fl_ctx)
def _after_send_validate_task_cb(self, client_task: ClientTask, fl_ctx: FLContext):
# Once task is sent clear data to restore memory
client_task.task.data = None
def _receive_val_result_cb(self, client_task: ClientTask, fl_ctx: FLContext):
# Find name of the client sending this
result = client_task.result
client_name = client_task.client.name
self._accept_val_result(client_name=client_name, result=result, fl_ctx=fl_ctx)
client_task.result = None
def _locate_server_models(self, fl_ctx: FLContext) -> bool:
# Load models from model_locator
self.log_info(fl_ctx, "Locating server models.")
server_model_names = self._model_locator.get_model_names(fl_ctx)
unique_names = []
for name in server_model_names:
# Get the model
dxo = self._model_locator.locate_model(name, fl_ctx)
if not isinstance(dxo, DXO):
self.system_panic(f"ModelLocator produced invalid data: expect DXO but got {type(dxo)}.", fl_ctx)
return False
# Save to workspace
unique_name = "SRV_" + name
unique_names.append(unique_name)
try:
save_path = self._save_dxo_content(unique_name, self._cross_val_models_dir, dxo, fl_ctx)
except:
self.log_exception(fl_ctx, f"Unable to save shareable contents of server model {unique_name}")
self.system_panic(f"Unable to save shareable contents of server model {unique_name}", fl_ctx)
return False
self._server_models[unique_name] = save_path
self._val_results[unique_name] = {}
if unique_names:
self.log_info(fl_ctx, f"Server models loaded: {unique_names}.")
else:
self.log_info(fl_ctx, "no server models to validate!")
return True
def _accept_local_model(self, client_name: str, result: Shareable, fl_ctx: FLContext):
fl_ctx.set_prop(AppConstants.RECEIVED_MODEL, result, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.RECEIVED_MODEL_OWNER, client_name, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.CROSS_VAL_DIR, self._cross_val_dir, private=True, sticky=False)
self.fire_event(AppEventType.RECEIVE_BEST_MODEL, fl_ctx)
# get return code
rc = result.get_return_code()
if rc and rc != ReturnCode.OK:
# Raise errors if bad peer context or execution exception.
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.log_error(fl_ctx, "Peer context is bad or missing. No model submitted for this client.")
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.log_error(
fl_ctx, "Execution Exception on client during model submission. No model submitted for this client."
)
# Ignore contribution if result invalid.
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
ReturnCode.TASK_UNKNOWN,
]:
self.log_error(fl_ctx, "Execution result is not a shareable. Model submission will be ignored.")
else:
self.log_error(fl_ctx, "Return code set. Model submission from client will be ignored.")
else:
# Save shareable in models directory.
try:
self.log_debug(fl_ctx, "Extracting DXO from shareable.")
dxo = from_shareable(result)
except ValueError as e:
self.log_error(
fl_ctx,
f"Ignored bad result from {client_name}: {secure_format_exception(e)}",
)
return
# The DXO could contain multiple sub-DXOs (e.g. received from a T2 system)
leaf_dxos, errors = get_leaf_dxos(dxo, client_name)
if errors:
for err in errors:
self.log_error(fl_ctx, f"Bad result from {client_name}: {err}")
for k, v in leaf_dxos.items():
self._save_client_model(k, v, fl_ctx)
def _save_client_model(self, model_name: str, dxo: DXO, fl_ctx: FLContext):
save_path = self._save_dxo_content(model_name, self._cross_val_models_dir, dxo, fl_ctx)
self.log_info(fl_ctx, f"Saved client model {model_name} to {save_path}")
self._client_models[model_name] = save_path
# Send a model to this client to validate
self._send_validation_task(model_name, fl_ctx)
def _send_validation_task(self, model_name: str, fl_ctx: FLContext):
self.log_info(fl_ctx, f"Sending {model_name} model to all participating clients for validation.")
# Create validation task and broadcast to all participating clients.
task = Task(
name=self._validation_task_name,
data=Shareable(),
before_task_sent_cb=self._before_send_validate_task_cb,
after_task_sent_cb=self._after_send_validate_task_cb,
result_received_cb=self._receive_val_result_cb,
timeout=self._validation_timeout,
props={AppConstants.MODEL_OWNER: model_name},
)
self.broadcast(
task=task,
fl_ctx=fl_ctx,
targets=self._participating_clients,
min_responses=len(self._participating_clients),
wait_time_after_min_received=0,
)
def _accept_val_result(self, client_name: str, result: Shareable, fl_ctx: FLContext):
model_owner = result.get_cookie(AppConstants.MODEL_OWNER, "")
# Fire event. This needs to be a new local context per each client
fl_ctx.set_prop(AppConstants.MODEL_OWNER, model_owner, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.DATA_CLIENT, client_name, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, result, private=True, sticky=False)
self.fire_event(AppEventType.VALIDATION_RESULT_RECEIVED, fl_ctx)
rc = result.get_return_code()
if rc and rc != ReturnCode.OK:
# Raise errors if bad peer context or execution exception.
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.log_error(fl_ctx, "Peer context is bad or missing.")
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.log_error(fl_ctx, "Execution Exception in model validation.")
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
]:
self.log_error(fl_ctx, "Execution result is not a shareable. Validation results will be ignored.")
else:
self.log_error(
fl_ctx,
f"Client {client_name} sent results for validating {model_owner} model with return code set."
" Logging empty results.",
)
if client_name not in self._val_results:
self._val_results[client_name] = {}
self._val_results[client_name][model_owner] = {}
else:
try:
dxo = from_shareable(result)
except ValueError as e:
reason = (
f"Bad validation result from {client_name} on model {model_owner}. "
f"Exception: {secure_format_exception(e)}"
)
self.log_exception(fl_ctx, reason)
return
# The DXO could contain multiple sub-DXOs (e.g. received from a T2 system)
leaf_dxos, errors = get_leaf_dxos(dxo, client_name)
if errors:
for err in errors:
self.log_error(fl_ctx, f"Bad result from {client_name}: {err}")
for k, v in leaf_dxos.items():
self._save_validation_result(k, model_owner, v, fl_ctx)
def _save_validation_result(self, client_name: str, model_name: str, dxo, fl_ctx):
file_name = client_name + "_" + model_name
file_path = self._save_dxo_content(file_name, self._cross_val_results_dir, dxo, fl_ctx)
client_results = self._val_results.get(client_name, None)
if not client_results:
client_results = {}
self._val_results[client_name] = client_results
client_results[model_name] = file_path
self.log_info(
fl_ctx, f"Saved validation result from client '{client_name}' on model '{model_name}' in {file_path}"
)
def _save_dxo_content(self, name: str, save_dir: str, dxo: DXO, fl_ctx: FLContext) -> str:
"""Saves shareable to given directory within the app_dir.
Args:
name (str): Name of shareable
save_dir (str): Relative path to directory in which to save
dxo (DXO): DXO object
fl_ctx (FLContext): FLContext object
Returns:
str: Path to the file saved.
"""
# Save the model with name as the filename to shareable directory
data_filename = os.path.join(save_dir, name)
try:
bytes_to_save = dxo.to_bytes()
except Exception as e:
raise ValueError(f"Unable to extract shareable contents. Exception: {(secure_format_exception(e))}")
# Save contents to path
try:
with open(data_filename, "wb") as f:
f.write(bytes_to_save)
except Exception as e:
raise ValueError(f"Unable to save DXO content: {secure_format_exception(e)}")
return data_filename
def _load_validation_content(self, name: str, load_dir: str, fl_ctx: FLContext) -> Union[DXO, None]:
# Load shareable from disk
shareable_filename = os.path.join(load_dir, name)
# load shareable
try:
with open(shareable_filename, "rb") as f:
data = f.read()
dxo: DXO = from_bytes(data)
self.log_debug(fl_ctx, f"Loading cross validation shareable content with name: {name}.")
except Exception as e:
raise ValueError(f"Exception in loading shareable content for {name}: {secure_format_exception(e)}")
return dxo
def handle_event(self, event_type: str, fl_ctx: FLContext):
super().handle_event(event_type=event_type, fl_ctx=fl_ctx)
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
if self._formatter:
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR, None)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError("collector must be GroupInfoCollector but got {}".format(type(collector)))
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, self._val_results, private=True, sticky=False)
val_info = self._formatter.format(fl_ctx)
collector.add_info(
group_name=self._name,
info={"val_results": val_info},
)
else:
self.log_warning(fl_ctx, "No formatter provided. Validation results can't be printed.")
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
if task_name == self._submit_model_task_name:
self._accept_local_model(client_name=client.name, result=result, fl_ctx=fl_ctx)
elif task_name == self._validation_task_name:
self._accept_val_result(client_name=client.name, result=result, fl_ctx=fl_ctx)
else:
self.log_error(fl_ctx, "Ignoring result from unknown task.")
| NVFlare-main | nvflare/app_common/workflows/cross_site_model_eval.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.app_common.app_constant import AppConstants
from .cross_site_model_eval import CrossSiteModelEval
class GlobalModelEval(CrossSiteModelEval):
def __init__(
self,
task_check_period=0.5,
cross_val_dir=AppConstants.CROSS_VAL_DIR,
validation_timeout: int = 6000,
model_locator_id="",
formatter_id="",
validation_task_name=AppConstants.TASK_VALIDATION,
cleanup_models=False,
participating_clients=None,
wait_for_clients_timeout=300,
):
"""Cross Site Model Validation workflow.
Args:
task_check_period (float, optional): How often to check for new tasks or tasks being finished.
Defaults to 0.5.
cross_val_dir (str, optional): Path to cross site validation directory relative to run directory.
Defaults to "cross_site_val".
validation_timeout (int, optional): Timeout for validate_model task. Defaults to 6000.
model_locator_id (str, optional): ID for model_locator component. Defaults to None.
formatter_id (str, optional): ID for formatter component. Defaults to None.
validation_task_name (str, optional): Name of validate_model task. Defaults to "validate".
cleanup_models (bool, optional): Whether models should be deleted after run. Defaults to False.
participating_clients (list, optional): List of participating client names. If not provided, defaults
to all clients connected at start of controller.
wait_for_clients_timeout (int, optional): Timeout for clients to appear. Defaults to 300 secs
"""
if not model_locator_id:
raise ValueError("missing required model_locator_id")
CrossSiteModelEval.__init__(
self,
task_check_period=task_check_period,
cross_val_dir=cross_val_dir,
validation_timeout=validation_timeout,
model_locator_id=model_locator_id,
formatter_id=formatter_id,
validation_task_name=validation_task_name,
submit_model_task_name="",
cleanup_models=cleanup_models,
participating_clients=participating_clients,
wait_for_clients_timeout=wait_for_clients_timeout,
)
| NVFlare-main | nvflare/app_common/workflows/global_model_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/job_schedulers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import threading
import time
from typing import Dict, List, Optional, Tuple
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import ALL_SITES, SERVER_SITE_NAME, Job, JobMetaKey, RunStatus
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec
from nvflare.apis.job_scheduler_spec import DispatchInfo, JobSchedulerSpec
from nvflare.apis.server_engine_spec import ServerEngineSpec
SCHEDULE_RESULT_OK = 0 # the job is scheduled
SCHEDULE_RESULT_NO_RESOURCE = 1 # job is not scheduled due to lack of resources
SCHEDULE_RESULT_BLOCK = 2 # job is to be blocked from scheduled again due to fatal error
class DefaultJobScheduler(JobSchedulerSpec, FLComponent):
def __init__(
self,
max_jobs: int = 1,
max_schedule_count: int = 10,
min_schedule_interval: float = 10.0,
max_schedule_interval: float = 600.0,
):
"""
Create a DefaultJobScheduler
Args:
max_jobs: max number of concurrent jobs allowed
max_schedule_count: max number of times to try to schedule a job
min_schedule_interval: min interval between two schedules
max_schedule_interval: max interval between two schedules
"""
super().__init__()
self.max_jobs = max_jobs
self.max_schedule_count = max_schedule_count
self.min_schedule_interval = min_schedule_interval
self.max_schedule_interval = max_schedule_interval
self.scheduled_jobs = []
self.lock = threading.Lock()
def _check_client_resources(
self, job_id: str, resource_reqs: Dict[str, dict], fl_ctx: FLContext
) -> Dict[str, Tuple[bool, str]]:
"""Checks resources on each site.
Args:
resource_reqs (dict): {client_name: resource_requirements}
Returns:
A dict of {client_name: client_check_result}.
client_check_result is a tuple of (is_resource_enough, token);
is_resource_enough is a bool indicates whether there is enough resources;
token is for resource reservation / cancellation for this check request.
"""
engine = fl_ctx.get_engine()
if not isinstance(engine, ServerEngineSpec):
raise RuntimeError(f"engine inside fl_ctx should be of type ServerEngineSpec, but got {type(engine)}.")
result = engine.check_client_resources(job_id, resource_reqs)
self.log_debug(fl_ctx, f"check client resources result: {result}")
return result
def _cancel_resources(
self, resource_reqs: Dict[str, dict], resource_check_results: Dict[str, Tuple[bool, str]], fl_ctx: FLContext
):
"""Cancels any reserved resources based on resource check results.
Args:
resource_reqs (dict): {client_name: resource_requirements}
resource_check_results: A dict of {client_name: client_check_result}
where client_check_result is a tuple of {is_resource_enough, resource reserve token if any}
fl_ctx: FL context
"""
engine = fl_ctx.get_engine()
if not isinstance(engine, ServerEngineSpec):
raise RuntimeError(f"engine inside fl_ctx should be of type ServerEngineSpec, but got {type(engine)}.")
engine.cancel_client_resources(resource_check_results, resource_reqs)
self.log_debug(fl_ctx, f"cancel client resources using check results: {resource_check_results}")
return False, None
def _try_job(self, job: Job, fl_ctx: FLContext) -> (int, Optional[Dict[str, DispatchInfo]], str):
engine = fl_ctx.get_engine()
online_clients = engine.get_clients()
online_site_names = [x.name for x in online_clients]
if not job.deploy_map:
self.log_error(fl_ctx, f"Job '{job.job_id}' does not have deploy_map, can't be scheduled.")
return SCHEDULE_RESULT_BLOCK, None, "no deploy map"
applicable_sites = []
sites_to_app = {}
for app_name in job.deploy_map:
for site_name in job.deploy_map[app_name]:
if site_name.upper() == ALL_SITES:
# deploy_map: {"app_name": ["ALL_SITES"]} will be treated as deploying to all online clients
applicable_sites = online_site_names
sites_to_app = {x: app_name for x in online_site_names}
sites_to_app[SERVER_SITE_NAME] = app_name
elif site_name in online_site_names:
applicable_sites.append(site_name)
sites_to_app[site_name] = app_name
elif site_name == SERVER_SITE_NAME:
sites_to_app[SERVER_SITE_NAME] = app_name
self.log_debug(fl_ctx, f"Job {job.job_id} is checking against applicable sites: {applicable_sites}")
required_sites = job.required_sites if job.required_sites else []
if required_sites:
for s in required_sites:
if s not in applicable_sites:
self.log_debug(fl_ctx, f"Job {job.job_id} can't be scheduled: required site {s} is not connected.")
return SCHEDULE_RESULT_NO_RESOURCE, None, f"missing required site {s}"
if job.min_sites and len(applicable_sites) < job.min_sites:
self.log_debug(
fl_ctx,
f"Job {job.job_id} can't be scheduled: connected sites ({len(applicable_sites)}) "
f"are less than min_sites ({job.min_sites}).",
)
return (
SCHEDULE_RESULT_NO_RESOURCE,
None,
f"connected sites ({len(applicable_sites)}) < min_sites ({job.min_sites})",
)
# we are assuming server resource is sufficient
resource_reqs = {}
for site_name in applicable_sites:
if site_name in job.resource_spec:
resource_reqs[site_name] = job.resource_spec[site_name]
else:
resource_reqs[site_name] = {}
job_participants = [fl_ctx.get_identity_name(default=SERVER_SITE_NAME)]
job_participants.extend(applicable_sites)
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job.job_id, private=True)
fl_ctx.set_prop(FLContextKey.CLIENT_RESOURCE_SPECS, resource_reqs, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.JOB_PARTICIPANTS, job_participants, private=True, sticky=False)
self.fire_event(EventType.BEFORE_CHECK_CLIENT_RESOURCES, fl_ctx)
block_reason = fl_ctx.get_prop(FLContextKey.JOB_BLOCK_REASON)
if block_reason:
# cannot schedule this job
self.log_info(fl_ctx, f"Job {job.job_id} can't be scheduled: {block_reason}")
return SCHEDULE_RESULT_NO_RESOURCE, None, block_reason
resource_check_results = self._check_client_resources(
job_id=job.job_id, resource_reqs=resource_reqs, fl_ctx=fl_ctx
)
if not resource_check_results:
self.log_debug(fl_ctx, f"Job {job.job_id} can't be scheduled: resource check results is None or empty.")
return SCHEDULE_RESULT_NO_RESOURCE, None, "error checking resources"
required_sites_not_enough_resource = list(required_sites)
num_sites_ok = 0
sites_dispatch_info = {}
for site_name, check_result in resource_check_results.items():
is_resource_enough, token = check_result
if is_resource_enough:
sites_dispatch_info[site_name] = DispatchInfo(
app_name=sites_to_app[site_name],
resource_requirements=resource_reqs[site_name],
token=token,
)
num_sites_ok += 1
if site_name in required_sites:
required_sites_not_enough_resource.remove(site_name)
if num_sites_ok < job.min_sites:
self.log_debug(fl_ctx, f"Job {job.job_id} can't be scheduled: not enough sites have enough resources.")
self._cancel_resources(
resource_reqs=job.resource_spec, resource_check_results=resource_check_results, fl_ctx=fl_ctx
)
return (
SCHEDULE_RESULT_NO_RESOURCE,
None,
f"not enough sites have enough resources (ok sites {num_sites_ok} < min sites {job.min_sites}",
)
if required_sites_not_enough_resource:
self.log_debug(
fl_ctx,
f"Job {job.job_id} can't be scheduled: required sites: {required_sites_not_enough_resource}"
f" don't have enough resources.",
)
self._cancel_resources(
resource_reqs=job.resource_spec, resource_check_results=resource_check_results, fl_ctx=fl_ctx
)
return (
SCHEDULE_RESULT_NO_RESOURCE,
None,
f"required sites: {required_sites_not_enough_resource} don't have enough resources",
)
# add server dispatch info
sites_dispatch_info[SERVER_SITE_NAME] = DispatchInfo(
app_name=sites_to_app[SERVER_SITE_NAME], resource_requirements={}, token=None
)
return SCHEDULE_RESULT_OK, sites_dispatch_info, ""
def _exceed_max_jobs(self, fl_ctx: FLContext) -> bool:
exceed_limit = False
with self.lock:
if len(self.scheduled_jobs) >= self.max_jobs:
self.log_debug(
fl_ctx,
f"Skipping schedule job because scheduled_jobs ({len(self.scheduled_jobs)}) "
f"is greater than max_jobs ({self.max_jobs})",
)
exceed_limit = True
return exceed_limit
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.JOB_STARTED:
with self.lock:
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID)
if job_id not in self.scheduled_jobs:
self.scheduled_jobs.append(job_id)
elif event_type == EventType.JOB_COMPLETED or event_type == EventType.JOB_ABORTED:
with self.lock:
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID)
if job_id in self.scheduled_jobs:
self.scheduled_jobs.remove(job_id)
def schedule_job(
self, job_manager: JobDefManagerSpec, job_candidates: List[Job], fl_ctx: FLContext
) -> (Optional[Job], Optional[Dict[str, DispatchInfo]]):
failed_jobs = []
blocked_jobs = []
try:
ready_job, dispatch_info = self._do_schedule_job(job_candidates, fl_ctx, failed_jobs, blocked_jobs)
except:
self.log_exception(fl_ctx, "error scheduling job")
ready_job, dispatch_info = None, None
# process failed and blocked jobs
try:
if failed_jobs:
# set the try count
for job in failed_jobs:
job_manager.refresh_meta(job, self._get_update_meta_keys(), fl_ctx)
if blocked_jobs:
for job in blocked_jobs:
job_manager.refresh_meta(job, self._get_update_meta_keys(), fl_ctx)
job_manager.set_status(job.job_id, RunStatus.FINISHED_CANT_SCHEDULE, fl_ctx)
except:
self.log_exception(fl_ctx, "error updating scheduling info in job store")
return ready_job, dispatch_info
def _get_update_meta_keys(self):
return [
JobMetaKey.SCHEDULE_COUNT.value,
JobMetaKey.LAST_SCHEDULE_TIME.value,
JobMetaKey.SCHEDULE_HISTORY.value,
]
def _update_schedule_history(self, job: Job, result: str, fl_ctx: FLContext):
history = job.meta.get(JobMetaKey.SCHEDULE_HISTORY.value, None)
if not history:
history = []
job.meta[JobMetaKey.SCHEDULE_HISTORY.value] = history
now = datetime.datetime.now()
cur_time = now.strftime("%Y-%m-%d %H:%M:%S")
history.append(f"{cur_time}: {result}")
self.log_info(fl_ctx, f"Try to schedule job {job.job_id}, get result: ({result}).")
schedule_count = job.meta.get(JobMetaKey.SCHEDULE_COUNT.value, 0)
schedule_count += 1
job.meta[JobMetaKey.SCHEDULE_COUNT.value] = schedule_count
job.meta[JobMetaKey.LAST_SCHEDULE_TIME.value] = time.time()
def _do_schedule_job(
self, job_candidates: List[Job], fl_ctx: FLContext, failed_jobs: list, blocked_jobs: list
) -> (Optional[Job], Optional[Dict[str, DispatchInfo]]):
self.log_debug(fl_ctx, f"Current scheduled_jobs is {self.scheduled_jobs}")
if self._exceed_max_jobs(fl_ctx=fl_ctx):
self.log_debug(fl_ctx, f"skipped scheduling since there are {self.max_jobs} concurrent job(s) already")
return None, None
# sort by submitted time
job_candidates.sort(key=lambda j: j.meta.get(JobMetaKey.SUBMIT_TIME.value, 0.0))
engine = fl_ctx.get_engine()
for job in job_candidates:
schedule_count = job.meta.get(JobMetaKey.SCHEDULE_COUNT.value, 0)
if schedule_count >= self.max_schedule_count:
self.log_info(
fl_ctx, f"skipped job {job.job_id} since it exceeded max schedule count {self.max_schedule_count}"
)
blocked_jobs.append(job)
self._update_schedule_history(job, f"exceeded max schedule count {self.max_schedule_count}", fl_ctx)
continue
last_schedule_time = job.meta.get(JobMetaKey.LAST_SCHEDULE_TIME.value, 0.0)
time_since_last_schedule = time.time() - last_schedule_time
n = 0 if schedule_count == 0 else schedule_count - 1
required_interval = min(self.max_schedule_interval, (2**n) * self.min_schedule_interval)
if time_since_last_schedule < required_interval:
# do not schedule again too soon
continue
with engine.new_context() as ctx:
rc, sites_dispatch_info, result = self._try_job(job, ctx)
self.log_debug(ctx, f"Try to schedule job {job.job_id}, get result: {rc}, {sites_dispatch_info}.")
if not result:
result = "scheduled"
self._update_schedule_history(job, result, ctx)
if rc == SCHEDULE_RESULT_OK:
return job, sites_dispatch_info
elif rc == SCHEDULE_RESULT_NO_RESOURCE:
failed_jobs.append(job)
else:
blocked_jobs.append(job)
self.log_debug(fl_ctx, "No job is scheduled.")
return None, None
def restore_scheduled_job(self, job_id: str):
with self.lock:
if job_id not in self.scheduled_jobs:
self.scheduled_jobs.append(job_id)
def remove_scheduled_job(self, job_id: str):
with self.lock:
if job_id in self.scheduled_jobs:
self.scheduled_jobs.remove(job_id)
| NVFlare-main | nvflare/app_common/job_schedulers/job_scheduler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.fl_model import FLModel, FLModelConst, MetaKey, ParamsType
from nvflare.app_common.app_constant import AppConstants
from nvflare.fuel.utils.validation_utils import check_object_type
MODEL_ATTRS = [
FLModelConst.PARAMS_TYPE,
FLModelConst.PARAMS,
FLModelConst.METRICS,
FLModelConst.OPTIMIZER_PARAMS,
FLModelConst.CURRENT_ROUND,
FLModelConst.TOTAL_ROUNDS,
FLModelConst.META,
]
params_type_to_data_kind = {
ParamsType.FULL.value: DataKind.WEIGHTS,
ParamsType.DIFF.value: DataKind.WEIGHT_DIFF,
}
data_kind_to_params_type = {v: k for k, v in params_type_to_data_kind.items()}
class ParamsConverter(ABC):
"""This class converts params from one format to the other."""
@abstractmethod
def convert(self, params: Dict) -> Dict:
pass
class FLModelUtils:
@staticmethod
def to_shareable(fl_model: FLModel, params_converter: Optional[ParamsConverter] = None) -> Shareable:
"""From FLModel to NVFlare side shareable.
This is a temporary solution to converts FLModel to the shareable of existing style,
so that we can reuse the existing components we have.
In the future, we should be using the to_dxo, from_dxo directly.
And all the components should be changed to accept the standard DXO.
"""
if fl_model.params is None and fl_model.metrics is None:
raise ValueError("FLModel without params and metrics is NOT supported.")
elif fl_model.params is not None:
if fl_model.params_type is None:
raise ValueError(f"Invalid ParamsType: ({fl_model.params_type}).")
data_kind = params_type_to_data_kind.get(fl_model.params_type)
if data_kind is None:
raise ValueError(f"Invalid ParamsType: ({fl_model.params_type}).")
if params_converter is not None:
fl_model.params = params_converter.convert(fl_model.params)
if fl_model.metrics is None:
dxo = DXO(data_kind, data=fl_model.params, meta={})
else:
# if both params and metrics are presented, will be treated as initial evaluation on the global model
dxo = DXO(data_kind, data=fl_model.params, meta={MetaKey.INITIAL_METRICS: fl_model.metrics})
else:
dxo = DXO(DataKind.METRICS, data=fl_model.metrics, meta={})
meta = fl_model.meta if fl_model.meta is not None else {}
dxo.meta.update(meta)
shareable = dxo.to_shareable()
if fl_model.current_round is not None:
shareable.set_header(AppConstants.CURRENT_ROUND, fl_model.current_round)
if fl_model.total_rounds is not None:
shareable.set_header(AppConstants.NUM_ROUNDS, fl_model.total_rounds)
if MetaKey.VALIDATE_TYPE in meta:
shareable.set_header(AppConstants.VALIDATE_TYPE, meta[MetaKey.VALIDATE_TYPE])
return shareable
@staticmethod
def from_shareable(
shareable: Shareable, params_converter: Optional[ParamsConverter] = None, fl_ctx: Optional[FLContext] = None
) -> FLModel:
"""From NVFlare side shareable to FLModel.
This is a temporary solution to converts the shareable of existing style to FLModel,
so that we can reuse the existing components we have.
In the future, we should be using the to_dxo, from_dxo directly.
And all the components should be changed to accept the standard DXO.
"""
dxo = from_shareable(shareable)
metrics = None
params_type = None
params = None
if dxo.data_kind == DataKind.METRICS:
metrics = dxo.data
else:
params_type = data_kind_to_params_type.get(dxo.data_kind)
if params_type is None:
raise ValueError(f"Invalid shareable with dxo that has data kind: {dxo.data_kind}")
params_type = ParamsType(params_type)
if params_converter:
dxo.data = params_converter.convert(dxo.data)
params = dxo.data
current_round = shareable.get_header(AppConstants.CURRENT_ROUND, None)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS, None)
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE, None)
meta = dict(dxo.meta)
if validate_type is not None:
meta[MetaKey.VALIDATE_TYPE] = validate_type
if fl_ctx is not None:
meta[MetaKey.JOB_ID] = fl_ctx.get_job_id()
meta[MetaKey.SITE_NAME] = fl_ctx.get_identity_name()
result = FLModel(
params_type=params_type,
params=params,
metrics=metrics,
current_round=current_round,
total_rounds=total_rounds,
meta=meta,
)
return result
@staticmethod
def to_dxo(fl_model: FLModel) -> DXO:
"""Converts FLModel to a DXO."""
attr_dict = {}
for attr in MODEL_ATTRS:
value = getattr(fl_model, attr, None)
if value is not None:
attr_dict[attr] = value
result = DXO(data_kind=DataKind.FL_MODEL, data=attr_dict)
return result
@staticmethod
def from_dxo(dxo: DXO) -> FLModel:
"""Converts DXO to FLModel."""
if dxo.data_kind != DataKind.FL_MODEL:
raise ValueError(f"Invalid dxo with data_kind: {dxo.data_kind}")
if not isinstance(dxo.data, dict):
raise ValueError(f"Invalid dxo with data of type: {type(dxo.data)}")
params = dxo.data.get(FLModelConst.PARAMS, None)
params_type = dxo.data.get(FLModelConst.PARAMS_TYPE, None)
metrics = dxo.data.get(FLModelConst.METRICS, None)
optimizer_params = dxo.data.get(FLModelConst.OPTIMIZER_PARAMS, None)
current_round = dxo.data.get(FLModelConst.CURRENT_ROUND, None)
total_rounds = dxo.data.get(FLModelConst.TOTAL_ROUNDS, None)
meta = dxo.data.get(FLModelConst.META, None)
return FLModel(
params=params,
params_type=params_type,
metrics=metrics,
optimizer_params=optimizer_params,
current_round=current_round,
total_rounds=total_rounds,
meta=meta,
)
@staticmethod
def get_meta_prop(model: FLModel, key: str, default=None):
check_object_type("model", model, FLModel)
if not model.meta:
return default
else:
return model.meta.get(key, default)
@staticmethod
def set_meta_prop(model: FLModel, key: str, value: Any):
check_object_type("model", model, FLModel)
model.meta[key] = value
@staticmethod
def get_configs(model: FLModel) -> Optional[dict]:
return FLModelUtils.get_meta_prop(model, MetaKey.CONFIGS)
| NVFlare-main | nvflare/app_common/utils/fl_model_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def check_component_type(comp, t):
if not isinstance(comp, t):
raise TypeError(f"{type(comp).__name__} must implement `{t}` type. Got: {type(comp)}")
| NVFlare-main | nvflare/app_common/utils/component_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Optional
def get_ext_format(ext: str) -> str:
if ext is None or ext == "" or ext.isspace():
return "csv"
elif ext.startswith("."):
return ext[1:]
else:
return ext
def get_file_format(input_path: str) -> str:
ext = get_file_ext(input_path)
return get_ext_format(ext)
def get_file_ext(input_path: str) -> Optional[str]:
ext = pathlib.Path(input_path).suffix
if ext.startswith("."):
return ext[1:]
else:
return ext
| NVFlare-main | nvflare/app_common/utils/file_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import numpy as np
class ObjectEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return self.default(obj.to_json())
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
elif hasattr(obj, "__dict__"):
d = dict(
(key, value)
for key, value in inspect.getmembers(obj)
if not key.startswith("__")
and not inspect.isabstract(value)
and not inspect.isbuiltin(value)
and not inspect.isfunction(value)
and not inspect.isgenerator(value)
and not inspect.isgeneratorfunction(value)
and not inspect.ismethod(value)
and not inspect.ismethoddescriptor(value)
and not inspect.isroutine(value)
)
return self.default(d)
return super(ObjectEncoder, self).default(obj)
| NVFlare-main | nvflare/app_common/utils/json_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class ModelExchangeFormat(str, Enum):
RAW = "raw"
PYTORCH = "pytorch"
NUMPY = "numpy"
| NVFlare-main | nvflare/app_common/model_exchange/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/model_exchange/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from nvflare.apis.utils.decomposers import flare_decomposers
from nvflare.app_common.decomposers import common_decomposers as app_common_decomposers
from nvflare.app_common.model_exchange.model_exchanger import ModelExchanger
from nvflare.fuel.utils.constants import Mode
from nvflare.fuel.utils.pipe.file_accessor import FileAccessor
from nvflare.fuel.utils.pipe.file_pipe import FilePipe
class FilePipeModelExchanger(ModelExchanger):
def __init__(
self,
data_exchange_path: str,
file_accessor: Optional[FileAccessor] = None,
pipe_name: str = "pipe",
topic: str = "data",
get_poll_interval: float = 0.5,
read_interval: float = 0.1,
heartbeat_interval: float = 5.0,
heartbeat_timeout: float = 30.0,
):
"""Initializes the FilePipeModelExchanger.
Args:
data_exchange_path (str): The path for data exchange. This is the location where the data
will be read from or written to.
file_accessor (Optional[FileAccessor]): The file accessor for reading and writing files.
If not provided, the default file accessor (FobsFileAccessor) will be used.
Please refer to the docstring of the FileAccessor class for more information
on implementing a custom file accessor. Defaults to None.
pipe_name (str): The name of the pipe to be used for communication. This pipe will be used
for transmitting data between the sender and receiver. Defaults to "pipe".
topic (str): The topic for data exchange. This allows the sender and receiver to identify
the purpose or content of the data being exchanged. Defaults to "data".
get_poll_interval (float): The interval (in seconds) for checking if the other side has sent data.
This determines how often the receiver checks for incoming data. Defaults to 0.5.
read_interval (float): The interval (in seconds) for reading from the pipe. This determines
how often the receiver reads data from the pipe. Defaults to 0.1.
heartbeat_interval (float): The interval (in seconds) for sending heartbeat signals to the peer.
Heartbeat signals are used to indicate that the sender or receiver is still active. Defaults to 5.0.
heartbeat_timeout (float): The timeout (in seconds) for waiting for a heartbeat signal from the peer.
If a heartbeat is not received within this timeout period, the connection may be considered lost.
Defaults to 30.0.
"""
flare_decomposers.register()
app_common_decomposers.register()
data_exchange_path = os.path.abspath(data_exchange_path)
file_pipe = FilePipe(Mode.PASSIVE, data_exchange_path)
if file_accessor is not None:
file_pipe.set_file_accessor(file_accessor)
super().__init__(
pipe=file_pipe,
pipe_name=pipe_name,
topic=topic,
get_poll_interval=get_poll_interval,
read_interval=read_interval,
heartbeat_interval=heartbeat_interval,
heartbeat_timeout=heartbeat_timeout,
)
| NVFlare-main | nvflare/app_common/model_exchange/file_pipe_model_exchanger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Any, Optional, Tuple
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.pipe.pipe_handler import PipeHandler, Topic
class DataExchangeException(Exception):
pass
class ExchangeTimeoutException(DataExchangeException):
pass
class ExchangeAbortException(DataExchangeException):
pass
class ExchangeEndException(DataExchangeException):
pass
class ExchangePeerGoneException(DataExchangeException):
pass
class ModelExchanger:
def __init__(
self,
pipe: Pipe,
pipe_name: str = "pipe",
topic: str = "data",
get_poll_interval: float = 0.5,
read_interval: float = 0.1,
heartbeat_interval: float = 5.0,
heartbeat_timeout: float = 30.0,
):
"""Initializes the ModelExchanger.
Args:
pipe (Pipe): The pipe used for data exchange.
pipe_name (str): Name of the pipe. Defaults to "pipe".
topic (str): Topic for data exchange. Defaults to "data".
get_poll_interval (float): Interval for checking if the other side has sent data. Defaults to 0.5.
read_interval (float): Interval for reading from the pipe. Defaults to 0.1.
heartbeat_interval (float): Interval for sending heartbeat to the peer. Defaults to 5.0.
heartbeat_timeout (float): Timeout for waiting for a heartbeat from the peer. Defaults to 30.0.
"""
self.logger = logging.getLogger(self.__class__.__name__)
self._req_id: Optional[str] = None
self._topic = topic
pipe.open(pipe_name)
self.pipe_handler = PipeHandler(
pipe,
read_interval=read_interval,
heartbeat_interval=heartbeat_interval,
heartbeat_timeout=heartbeat_timeout,
)
self.pipe_handler.start()
self._get_poll_interval = get_poll_interval
def submit_model(self, model: Any) -> None:
"""Submits a model for exchange.
Args:
model (Any): The model to be submitted.
Raises:
DataExchangeException: If there is no request ID available (needs to pull model from server first).
"""
if self._req_id is None:
raise DataExchangeException("need to pull a model first.")
self._send_reply(data=model, req_id=self._req_id)
def receive_model(self, timeout: Optional[float] = None) -> Any:
"""Receives a model.
Args:
timeout (Optional[float]): Timeout for waiting to receive a model. Defaults to None.
Returns:
Any: The received model.
Raises:
ExchangeTimeoutException: If the data cannot be received within the specified timeout.
ExchangeAbortException: If the other endpoint of the pipe requests to abort.
ExchangeEndException: If the other endpoint has ended.
ExchangePeerGoneException: If the other endpoint is gone.
"""
model, req_id = self._receive_request(timeout)
self._req_id = req_id
return model
def finalize(self, close_pipe: bool = True) -> None:
if self.pipe_handler is None:
raise RuntimeError("PipeMonitor is not initialized.")
self.pipe_handler.stop(close_pipe=close_pipe)
def _receive_request(self, timeout: Optional[float] = None) -> Tuple[Any, str]:
"""Receives a request.
Args:
timeout: how long to wait for the request to come.
Returns:
A tuple of (data, request id).
Raises:
ExchangeTimeoutException: If can't receive data within timeout seconds.
ExchangeAbortException: If the other endpoint of the pipe ask to abort.
ExchangeEndException: If the other endpoint has ended.
ExchangePeerGoneException: If the other endpoint is gone.
"""
if self.pipe_handler is None:
raise RuntimeError("PipeMonitor is not initialized.")
start = time.time()
while True:
msg: Optional[Message] = self.pipe_handler.get_next()
if not msg:
if timeout and time.time() - start > timeout:
self.pipe_handler.notify_abort(msg)
raise ExchangeTimeoutException(f"get data timeout after {timeout} secs")
elif msg.topic == Topic.ABORT:
raise ExchangeAbortException("the other end is aborted")
elif msg.topic == Topic.END:
raise ExchangeEndException(
f"received {msg.topic}: {msg.data} while waiting for result for {self._topic}"
)
elif msg.topic == Topic.PEER_GONE:
raise ExchangePeerGoneException(
f"received {msg.topic}: {msg.data} while waiting for result for {self._topic}"
)
elif msg.topic == self._topic:
return msg.data, msg.msg_id
time.sleep(self._get_poll_interval)
def _send_reply(self, data: Any, req_id: str, timeout: Optional[float] = None) -> bool:
"""Sends a reply.
Args:
data: The data exchange object to be sent.
req_id: request ID.
timeout: how long to wait for the peer to read the data.
If not specified, return False immediately.
Returns:
A bool indicates whether the peer has read the data.
"""
if self.pipe_handler is None:
raise RuntimeError("PipeMonitor is not initialized.")
msg = Message.new_reply(topic=self._topic, data=data, req_msg_id=req_id)
has_been_read = self.pipe_handler.send_to_peer(msg, timeout)
return has_been_read
| NVFlare-main | nvflare/app_common/model_exchange/model_exchanger.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/state_persistors/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.fl_snapshot import FLSnapshot, RunSnapshot
from nvflare.apis.state_persistor import StatePersistor
from nvflare.apis.storage import StorageSpec
from nvflare.fuel.utils import fobs
class StorageStatePersistor(StatePersistor):
def __init__(self, storage: StorageSpec, uri_root: str):
"""Creates a StorageStatePersistor.
Args:
storage: StorageSpec object
uri_root: where to store the states.
"""
self.storage = storage
self.uri_root = uri_root
def save(self, snapshot: RunSnapshot) -> str:
"""Call to save the snapshot of the FL state to storage.
Args:
snapshot: RunSnapshot object
Returns:
storage location
"""
path = os.path.join(self.uri_root, snapshot.job_id)
if snapshot.completed:
full_uri = self.storage.delete_object(path)
else:
full_uri = self.storage.create_object(uri=path, data=fobs.dumps(snapshot), meta={}, overwrite_existing=True)
return full_uri
def retrieve(self) -> FLSnapshot:
"""Call to load the persisted FL components snapshot from the persisted location.
Returns:
retrieved Snapshot
"""
all_items = self.storage.list_objects(self.uri_root)
fl_snapshot = FLSnapshot()
for item in all_items:
snapshot = fobs.loads(self.storage.get_data(item))
fl_snapshot.add_snapshot(snapshot.job_id, snapshot)
return fl_snapshot
def retrieve_run(self, job_id: str) -> RunSnapshot:
"""Call to load the persisted RunSnapshot of a job from the persisted location.
Args:
job_id: job_id
Returns:
RunSnapshot of the job_id
"""
path = os.path.join(self.uri_root, job_id)
snapshot = fobs.loads(self.storage.get_data(uri=path))
return snapshot
def delete(self):
"""Deletes the FL snapshot."""
all_items = self.storage.list_objects(self.uri_root)
for item in all_items:
self.storage.delete_object(item)
def delete_run(self, job_id: str):
"""Deletes the RunSnapshot of a job.
Args:
job_id: job_id
"""
path = os.path.join(self.uri_root, job_id)
self.storage.delete_object(path)
| NVFlare-main | nvflare/app_common/state_persistors/storage_state_persistor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.file_model_locator",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.file_model_locator import PTFileModelLocator
| NVFlare-main | nvflare/app_common/pt/pt_file_model_locator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.model_reader_writer",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.model_reader_writer import PTModelReaderWriter
| NVFlare-main | nvflare/app_common/pt/pt_model_reader_writer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.fedopt", category=FutureWarning, stacklevel=2
)
# flake8: noqa: F401
from nvflare.app_opt.pt.fedopt import PTFedOptModelShareableGenerator
| NVFlare-main | nvflare/app_common/pt/pt_fedopt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.", category=FutureWarning, stacklevel=2
)
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
# flake8: noqa: F401
from nvflare.app_opt.pt.utils import feed_vars
| NVFlare-main | nvflare/app_common/pt/pt_fed_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.multi_process_executor",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.multi_process_executor import PTMultiProcessExecutor
| NVFlare-main | nvflare/app_common/pt/pt_multi_process_executor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.fedproxloss",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
| NVFlare-main | nvflare/app_common/pt/pt_fedproxloss.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.file_model_persistor",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
| NVFlare-main | nvflare/app_common/pt/pt_file_model_persistor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/pt/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.ditto", category=FutureWarning, stacklevel=2
)
# flake8: noqa: F401
from nvflare.app_opt.pt.ditto import PTDittoHelper
| NVFlare-main | nvflare/app_common/pt/pt_ditto.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.tracking.tb_receiver.",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.tracking.tb.tb_receiver import TBAnalyticsReceiver
| NVFlare-main | nvflare/app_common/pt/tb_receiver.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.pt.scaffold",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.pt.scaffold import PTScaffoldHelper, get_lr_values
| NVFlare-main | nvflare/app_common/pt/pt_scaffold.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from typing import Dict, List
from nvflare.app_common.resource_managers.auto_clean_resource_manager import AutoCleanResourceManager
class ListResourceManager(AutoCleanResourceManager):
"""Manage a list of resource units.
For example:
- require 2, current resources is [0, 1, 2, 3, 4, 5] => return [0,1]
after allocation the current resources become [2, 3, 4, 5]
- require 3, current resources [2, 3, 4, 5] => return [2, 3, 4]
"""
def __init__(self, resources: Dict[str, List], expiration_period: int = 30):
"""Constructor
Args:
resources (dict): Specify the list of resources unit
expiration_period (int): Number of seconds to hold the resources reserved.
If check_resources is called but after "expiration_period" no allocate resource is called,
then the reserved resources will be released.
"""
if not isinstance(resources, dict):
raise TypeError(f"resources should be of type dict, but got {type(resources)}.")
resource_queue = {}
for k in resources:
if not isinstance(resources[k], list):
raise TypeError(f"item in resources should be of type list, but got {type(resources[k])}.")
resource_queue[k] = deque(resources[k])
super().__init__(resources=resource_queue, expiration_period=expiration_period)
def _deallocate(self, resources: dict):
for k, v in resources.items():
for i in v:
self.resources[k].appendleft(i)
def _check_required_resource_available(self, resource_requirement: dict) -> bool:
is_resource_enough = True
for k in resource_requirement:
if k in self.resources:
if len(self.resources[k]) < resource_requirement[k]:
is_resource_enough = False
break
else:
is_resource_enough = False
break
return is_resource_enough
def _reserve_resource(self, resource_requirement: dict) -> dict:
reserved_resources = {}
for k in resource_requirement:
reserved_resource_units = []
for i in range(resource_requirement[k]):
reserved_resource_units.append(self.resources[k].popleft())
reserved_resources[k] = reserved_resource_units
return reserved_resources
def _resource_to_dict(self) -> dict:
return {
"resources": {k: list(self.resources[k]) for k in self.resources},
"reserved_resources": self.reserved_resources,
}
| NVFlare-main | nvflare/app_common/resource_managers/list_resource_manager.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from nvflare.app_common.resource_managers.auto_clean_resource_manager import AutoCleanResourceManager
from nvflare.fuel.utils.gpu_utils import get_host_gpu_ids, get_host_gpu_memory_total
class GPUResource:
def __init__(self, gpu_id: int, gpu_memory: Union[int, float]):
self.id = gpu_id
self.memory = gpu_memory
def to_dict(self):
return {"gpu_id": self.id, "memory": self.memory}
class GPUResourceManager(AutoCleanResourceManager):
def __init__(
self,
num_of_gpus: int,
mem_per_gpu_in_GiB: Union[int, float],
num_gpu_key: str = "num_of_gpus",
gpu_mem_key: str = "mem_per_gpu_in_GiB",
expiration_period: Union[int, float] = 30,
):
"""Resource manager for GPUs.
Args:
num_of_gpus: Number of GPUs.
mem_per_gpu_in_GiB: Memory for each GPU.
num_gpu_key: The key in resource requirements that specify the number of GPUs.
gpu_mem_key: The key in resource requirements that specify the memory per GPU.
expiration_period: Number of seconds to hold the resources reserved.
If check_resources is called but after "expiration_period" no allocate resource is called,
then the reserved resources will be released.
"""
if not isinstance(num_of_gpus, int):
raise ValueError(f"num_of_gpus should be of type int, but got {type(num_of_gpus)}.")
if num_of_gpus < 0:
raise ValueError("num_of_gpus should be greater than or equal to 0.")
if not isinstance(mem_per_gpu_in_GiB, (float, int)):
raise ValueError(f"mem_per_gpu_in_GiB should be of type int or float, but got {type(mem_per_gpu_in_GiB)}.")
if mem_per_gpu_in_GiB < 0:
raise ValueError("mem_per_gpu_in_GiB should be greater than or equal to 0.")
if not isinstance(expiration_period, (float, int)):
raise ValueError(f"expiration_period should be of type int or float, but got {type(expiration_period)}.")
if expiration_period < 0:
raise ValueError("expiration_period should be greater than or equal to 0.")
if num_of_gpus > 0:
num_host_gpus = len(get_host_gpu_ids())
if num_of_gpus > num_host_gpus:
raise ValueError(f"num_of_gpus specified ({num_of_gpus}) exceeds available GPUs: {num_host_gpus}.")
host_gpu_mem = get_host_gpu_memory_total()
for i in host_gpu_mem:
if mem_per_gpu_in_GiB * 1024 > i:
raise ValueError(
f"Memory per GPU specified ({mem_per_gpu_in_GiB * 1024}) exceeds available GPU memory: {i}."
)
self.num_gpu_key = num_gpu_key
self.gpu_mem_key = gpu_mem_key
resources = {i: GPUResource(gpu_id=i, gpu_memory=mem_per_gpu_in_GiB) for i in range(num_of_gpus)}
super().__init__(resources=resources, expiration_period=expiration_period)
def _deallocate(self, resources: dict):
for k, v in resources.items():
self.resources[k].memory += v
def _check_required_resource_available(self, resource_requirement: dict) -> bool:
if not resource_requirement:
return True
if self.num_gpu_key not in resource_requirement:
raise ValueError(f"resource_requirement is missing num_gpu_key {self.num_gpu_key}.")
is_resource_enough = False
num_gpu = resource_requirement[self.num_gpu_key]
gpu_mem = resource_requirement.get(self.gpu_mem_key, 0)
satisfied = 0
for k in self.resources:
r: GPUResource = self.resources[k]
if r.memory >= gpu_mem:
satisfied += 1
if satisfied >= num_gpu:
is_resource_enough = True
break
return is_resource_enough
def _reserve_resource(self, resource_requirement: dict) -> dict:
if not resource_requirement:
return {}
if self.num_gpu_key not in resource_requirement:
raise ValueError(f"resource_requirement is missing num_gpu_key {self.num_gpu_key}.")
reserved_resources = {}
num_gpu = resource_requirement[self.num_gpu_key]
gpu_mem = resource_requirement.get(self.gpu_mem_key, 0)
reserved = 0
for k in self.resources:
r: GPUResource = self.resources[k]
if r.memory >= gpu_mem:
r.memory -= gpu_mem
reserved_resources[k] = gpu_mem
reserved += 1
if reserved == num_gpu:
break
return reserved_resources
def _resource_to_dict(self) -> dict:
return {
"resources": [self.resources[k].to_dict() for k in self.resources],
"reserved_resources": self.reserved_resources,
}
| NVFlare-main | nvflare/app_common/resource_managers/gpu_resource_manager.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/resource_managers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
from abc import ABC, abstractmethod
from threading import Event, Lock, Thread
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.apis.resource_manager_spec import ResourceManagerSpec
class AutoCleanResourceManager(ResourceManagerSpec, FLComponent, ABC):
def __init__(self, resources: dict, expiration_period: int = 30, check_period: float = 1.0):
"""AutoCleanResourceManager implementation.
It will automatically clean up reserved resources.
Args:
resources (dict): Specify the list of resources unit
expiration_period (int): Number of seconds to hold the resources reserved. default to 30.
If check_resources is called but after "expiration_period" no allocate resource is called,
then the reserved resources will be released.
check_period (float): Number of seconds to check for expired resources. default to 1.0.
"""
super().__init__()
if not isinstance(resources, dict):
raise TypeError(f"resources should be of type dict, but got {type(resources)}.")
if not isinstance(expiration_period, int):
raise TypeError(f"expiration_period should be of type int, but got {type(expiration_period)}.")
if expiration_period <= 0:
raise ValueError("expiration_period should be greater than 0.")
self.resources = resources
self.expiration_period = expiration_period
self.reserved_resources = {}
self._lock = Lock()
self._stop_event = Event()
self._cleanup_thread = Thread(target=self._check_expired)
self._check_period = check_period
@abstractmethod
def _deallocate(self, resources: dict):
"""Deallocates the resources.
Args:
resources (dict): the resources to be freed.
"""
raise NotImplementedError
@abstractmethod
def _check_required_resource_available(self, resource_requirement: dict) -> bool:
"""Checks if resources are available.
Args:
resource_requirement (dict): the resource requested.
Return:
A boolean to indicate whether the current resources are enough for the required resources.
"""
raise NotImplementedError
@abstractmethod
def _reserve_resource(self, resource_requirement: dict) -> dict:
"""Reserves resources given the requirements.
Args:
resource_requirement (dict): the resource requested.
Return:
A dict of reserved resources associated with the requested resource.
"""
raise NotImplementedError
@abstractmethod
def _resource_to_dict(self) -> dict:
raise NotImplementedError
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.SYSTEM_START:
self._cleanup_thread.start()
elif event_type == EventType.SYSTEM_END:
self._stop_event.set()
if self._cleanup_thread:
self._cleanup_thread.join()
self._cleanup_thread = None
def _check_expired(self):
while not self._stop_event.is_set():
time.sleep(self._check_period)
with self._lock:
tokens_to_remove = []
for k in self.reserved_resources:
r, t = self.reserved_resources[k]
t -= 1
if t == 0:
tokens_to_remove.append(k)
else:
self.reserved_resources[k] = r, t
for token in tokens_to_remove:
reserved_resources, _ = self.reserved_resources.pop(token)
self._deallocate(resources=reserved_resources)
self.logger.debug(f"current resources: {self.resources}, reserved_resources {self.reserved_resources}.")
def check_resources(self, resource_requirement: dict, fl_ctx: FLContext):
if not isinstance(resource_requirement, dict):
raise TypeError(f"resource_requirement should be of type dict, but got {type(resource_requirement)}.")
with self._lock:
is_resource_enough = self._check_required_resource_available(resource_requirement)
token = ""
# reserve resource only when enough resource
if is_resource_enough:
token = str(uuid.uuid4())
reserved_resources = self._reserve_resource(resource_requirement)
self.reserved_resources[token] = (reserved_resources, self.expiration_period)
self.log_debug(
fl_ctx, f"reserving resources: {reserved_resources} for requirements {resource_requirement}."
)
self.log_debug(
fl_ctx, f"current resources: {self.resources}, reserved_resources {self.reserved_resources}."
)
return is_resource_enough, token
def cancel_resources(self, resource_requirement: dict, token: str, fl_ctx: FLContext):
with self._lock:
if token and token in self.reserved_resources:
reserved_resources, _ = self.reserved_resources.pop(token)
self._deallocate(resources=reserved_resources)
self.log_debug(fl_ctx, f"cancelling resources: {reserved_resources}.")
self.log_debug(
fl_ctx, f"current resources: {self.resources}, reserved_resources {self.reserved_resources}."
)
else:
self.log_debug(fl_ctx, f"Token {token} is not related to any reserved resources.")
return None
def allocate_resources(self, resource_requirement: dict, token: str, fl_ctx: FLContext) -> dict:
result = {}
with self._lock:
if token and token in self.reserved_resources:
result, _ = self.reserved_resources.pop(token)
self.log_debug(fl_ctx, f"allocating resources: {result} for requirements: {resource_requirement}.")
self.log_debug(
fl_ctx, f"current resources: {self.resources}, reserved_resources {self.reserved_resources}."
)
else:
raise RuntimeError(f"allocate_resources: No reserved resources for token {token}.")
return result
def free_resources(self, resources: dict, token: str, fl_ctx: FLContext):
with self._lock:
self.log_debug(fl_ctx, f"freeing resources: {resources}.")
self.log_debug(
fl_ctx, f"current resources: {self.resources}, reserved_resources {self.reserved_resources}."
)
self._deallocate(resources=resources)
def report_resources(self, fl_ctx):
with self._lock:
return self._resource_to_dict()
| NVFlare-main | nvflare/app_common/resource_managers/auto_clean_resource_manager.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from nvflare.apis.dxo import MetaKey
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.fl_model import FLModel
from nvflare.app_common.abstract.model_learner import ModelLearner
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.app_common.utils.fl_model_utils import FLModelUtils
from nvflare.fuel.utils.validation_utils import check_object_type
from nvflare.security.logging import secure_format_exception
class ModelLearnerExecutor(Executor):
def __init__(
self,
learner_id,
train_task=AppConstants.TASK_TRAIN,
submit_model_task=AppConstants.TASK_SUBMIT_MODEL,
validate_task=AppConstants.TASK_VALIDATION,
configure_task=AppConstants.TASK_CONFIGURE,
):
"""Key component to run learner on clients.
Args:
learner_id (str): id of the learner object
train_task (str, optional): task name for train. Defaults to AppConstants.TASK_TRAIN.
submit_model_task (str, optional): task name for submit model. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task (str, optional): task name for validation. Defaults to AppConstants.TASK_VALIDATION.
configure_task (str, optional): task name for configure. Defaults to AppConstants.TASK_CONFIGURE.
"""
super().__init__()
self.learner_id = learner_id
self.learner = None
self.learner_name = ""
self.is_initialized = False
self.learner_exe_lock = threading.Lock() # used ensure only one execution at a time
self.task_funcs = {
train_task: self.train,
submit_model_task: self.submit_model,
validate_task: self.validate,
configure_task: self.configure,
}
def _abort(self, fl_ctx: FLContext):
self.learner.fl_ctx = fl_ctx
self.learner.abort()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._create_learner(fl_ctx)
elif event_type == EventType.ABORT_TASK:
try:
if self.learner:
if not self.unsafe:
self._abort(fl_ctx)
else:
self.log_warning(fl_ctx, f"skipped abort of unsafe learner {self.learner_name}")
except Exception as e:
self.log_exception(fl_ctx, f"learner abort exception: {secure_format_exception(e)}")
elif event_type == EventType.END_RUN:
if not self.unsafe:
self.finalize(fl_ctx)
elif self.learner:
self.log_warning(fl_ctx, f"skipped finalize of unsafe learner {self.learner_name}")
def _create_learner(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
self.learner = engine.get_component(self.learner_id)
if self.learner:
self.learner_name = self.learner.__class__.__name__
check_object_type("learner", self.learner, ModelLearner)
self.log_info(fl_ctx, f"Got learner: {self.learner_name}")
def initialize(self, fl_ctx: FLContext):
try:
engine = fl_ctx.get_engine()
self.learner.fl_ctx = fl_ctx
self.learner.engine = engine
self.learner.initialize()
except Exception as e:
self.log_exception(fl_ctx, f"initialize error from {self.learner_name}: {secure_format_exception(e)}")
raise e
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Do one task at a time since the shareable and fl_ctx are kept in "self".
with self.learner_exe_lock:
return self._do_execute(task_name, shareable, fl_ctx, abort_signal)
def _do_execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
self._setup_learner(self.learner, shareable, fl_ctx, abort_signal)
if not self.is_initialized:
self.is_initialized = True
self.initialize(fl_ctx)
task_func = self.task_funcs.get(task_name)
if task_func is not None:
return task_func(shareable, fl_ctx)
else:
self.log_error(fl_ctx, f"Unknown task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
@staticmethod
def _setup_learner(learner: ModelLearner, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
learner.shareable = shareable
learner.fl_ctx = fl_ctx
learner.abort_signal = abort_signal
if not learner.args:
learner.args = fl_ctx.get_prop(FLContextKey.ARGS)
if not learner.site_name:
learner.site_name = fl_ctx.get_identity_name()
if not learner.job_id:
learner.job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
if not learner.engine:
learner.engine = fl_ctx.get_engine()
learner.workspace = learner.engine.get_workspace()
learner.workspace_root = learner.workspace.get_root_dir()
learner.job_root = learner.workspace.get_run_dir(learner.job_id)
learner.app_root = learner.workspace.get_app_dir(learner.job_id)
if shareable:
learner.current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
learner.total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
def train(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
try:
shareable.set_header(AppConstants.VALIDATE_TYPE, ValidateType.BEFORE_TRAIN_VALIDATE)
model = FLModelUtils.from_shareable(shareable)
except ValueError:
self.log_error(fl_ctx, "request does not contain DXO")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
try:
val_result = self.learner.validate(model)
except Exception as e:
self.log_exception(
fl_ctx, f"Learner {self.learner_name} failed to pretrain validate: {secure_format_exception(e)}"
)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if isinstance(val_result, str):
# this is an error code!
self.log_warning(fl_ctx, f"Learner {self.learner_name}: pretrain validate failed: {val_result}")
val_result = None
if val_result:
if not isinstance(val_result, FLModel):
self.log_warning(
fl_ctx,
f"Learner {self.learner_name}: pretrain validate: expect FLModel but got {type(val_result)}",
)
val_result = None
elif not val_result.metrics:
self.log_warning(
fl_ctx,
f"Learner {self.learner_name}: pretrain validate: no metrics",
)
val_result = None
try:
train_result = self.learner.train(model)
except Exception as e:
self.log_exception(fl_ctx, f"Learner {self.learner_name} failed to train: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if isinstance(train_result, str):
# this is an error code!
return make_reply(train_result)
if not isinstance(train_result, FLModel):
self.log_error(
fl_ctx,
f"Learner {self.learner_name}: bad result from train: expect FLModel but got {type(train_result)}",
)
return make_reply(ReturnCode.EMPTY_RESULT)
# if the learner returned the valid BEFORE_TRAIN_VALIDATE result, set the INITIAL_METRICS in
# the train result, which can be used for best model selection.
if val_result:
FLModelUtils.set_meta_prop(
model=train_result,
key=MetaKey.INITIAL_METRICS,
value=val_result.metrics,
)
return FLModelUtils.to_shareable(train_result)
def submit_model(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
model_name = shareable.get_header(AppConstants.SUBMIT_MODEL_NAME)
try:
result = self.learner.get_model(model_name)
except Exception as e:
self.log_exception(fl_ctx, f"Learner {self.learner_name} failed to get_model: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if isinstance(result, str):
self.log_error(fl_ctx, f"Learner {self.learner_name} failed to get_model: {result}")
return make_reply(result)
if isinstance(result, FLModel):
return FLModelUtils.to_shareable(result)
else:
self.log_error(
fl_ctx,
f"Learner {self.learner_name} bad result from get_model: expect DXO but got {type(result)}",
)
return make_reply(ReturnCode.EMPTY_RESULT)
def validate(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
try:
model = FLModelUtils.from_shareable(shareable)
except ValueError:
self.log_error(fl_ctx, "request does not contain valid model")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
try:
result = self.learner.validate(model)
except Exception as e:
self.log_exception(fl_ctx, f"Learner {self.learner_name} failed to validate: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if isinstance(result, str):
self.log_error(fl_ctx, f"Learner {self.learner_name} failed to validate: {result}")
return make_reply(result)
if isinstance(result, FLModel):
return FLModelUtils.to_shareable(result)
else:
self.log_error(
fl_ctx, f"Learner {self.learner_name}: bad result from validate: expect FLModel but got {type(result)}"
)
return make_reply(ReturnCode.EMPTY_RESULT)
def configure(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
try:
model = FLModelUtils.from_shareable(shareable)
except ValueError:
self.log_error(fl_ctx, "request does not contain valid model data")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
rc = ReturnCode.OK
try:
self.learner.configure(model)
except Exception as e:
self.log_exception(fl_ctx, f"Learner {self.learner_name} failed to configure: {secure_format_exception(e)}")
rc = ReturnCode.EXECUTION_EXCEPTION
return make_reply(rc)
def finalize(self, fl_ctx: FLContext):
try:
self.learner.fl_ctx = fl_ctx
self.learner.finalize()
except Exception as e:
self.log_exception(fl_ctx, f"learner finalize exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/app_common/executors/model_learner_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.workflows.splitnn_workflow import SplitNNConstants
from nvflare.security.logging import secure_format_exception
class SplitNNLearnerExecutor(Executor):
def __init__(
self,
learner_id,
init_model_task_name=SplitNNConstants.TASK_INIT_MODEL,
train_task_name=SplitNNConstants.TASK_TRAIN,
):
"""Key component to run learner on clients.
Args:
learner_id (str): id pointing to the learner object
train_task_name (str, optional): label to dispatch train task. Defaults to AppConstants.TASK_TRAIN.
submit_model_task_name (str, optional): label to dispatch submit model task. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task_name (str, optional): label to dispatch validation task. Defaults to AppConstants.TASK_VALIDATION.
"""
super().__init__()
self.learner_id = learner_id
self.learner = None
self.init_model_task_name = init_model_task_name
self.train_task_name = train_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.ABORT_TASK:
try:
if self.learner:
self.learner.abort(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner abort exception: {secure_format_exception(e)}")
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
def initialize(self, fl_ctx: FLContext):
try:
engine = fl_ctx.get_engine()
self.learner = engine.get_component(self.learner_id)
if not isinstance(self.learner, Learner):
raise TypeError(f"learner must be Learner type. Got: {type(self.learner)}")
self.learner.initialize(engine.get_all_components(), fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner initialize exception: {secure_format_exception(e)}")
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
self.log_info(fl_ctx, f"Executing task {task_name}...")
try:
if task_name == self.init_model_task_name:
self.log_info(fl_ctx, "Initializing model...")
return self.learner.init_model(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
elif task_name == self.train_task_name:
self.log_info(fl_ctx, "Running training...")
return self.learner.train(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"learner execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def finalize(self, fl_ctx: FLContext):
try:
if self.learner:
self.learner.finalize(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner finalize exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/app_common/executors/splitnn_learner_executor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.security.logging import secure_format_exception
class LearnerExecutor(Executor):
def __init__(
self,
learner_id,
train_task=AppConstants.TASK_TRAIN,
submit_model_task=AppConstants.TASK_SUBMIT_MODEL,
validate_task=AppConstants.TASK_VALIDATION,
):
"""Key component to run learner on clients.
Args:
learner_id (str): id of the learner object
train_task (str, optional): task name for train. Defaults to AppConstants.TASK_TRAIN.
submit_model_task (str, optional): task name for submit model. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task (str, optional): task name for validation. Defaults to AppConstants.TASK_VALIDATION.
"""
super().__init__()
self.learner_id = learner_id
self.learner = None
self.train_task = train_task
self.submit_model_task = submit_model_task
self.validate_task = validate_task
self.is_initialized = False
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.ABORT_TASK:
try:
if self.learner:
if not self.unsafe:
self.learner.abort(fl_ctx)
else:
self.log_warning(fl_ctx, f"skipped abort of unsafe learner {self.learner.__class__.__name__}")
except Exception as e:
self.log_exception(fl_ctx, f"learner abort exception: {secure_format_exception(e)}")
elif event_type == EventType.END_RUN:
if not self.unsafe:
self.finalize(fl_ctx)
elif self.learner:
self.log_warning(fl_ctx, f"skipped finalize of unsafe learner {self.learner.__class__.__name__}")
def initialize(self, fl_ctx: FLContext):
try:
engine = fl_ctx.get_engine()
self.learner = engine.get_component(self.learner_id)
if not isinstance(self.learner, Learner):
raise TypeError(f"learner must be Learner type. Got: {type(self.learner)}")
self.learner.initialize(engine.get_all_components(), fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner initialize exception: {secure_format_exception(e)}")
raise e
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
if not self.is_initialized:
self.is_initialized = True
self.initialize(fl_ctx)
if task_name == self.train_task:
return self.train(shareable, fl_ctx, abort_signal)
elif task_name == self.submit_model_task:
return self.submit_model(shareable, fl_ctx)
elif task_name == self.validate_task:
return self.validate(shareable, fl_ctx, abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_debug(fl_ctx, f"train abort signal: {abort_signal.triggered}")
shareable.set_header(AppConstants.VALIDATE_TYPE, ValidateType.BEFORE_TRAIN_VALIDATE)
validate_result: Shareable = self.learner.validate(shareable, fl_ctx, abort_signal)
train_result = self.learner.train(shareable, fl_ctx, abort_signal)
if not (train_result and isinstance(train_result, Shareable)):
return make_reply(ReturnCode.EMPTY_RESULT)
# if the learner returned the valid BEFORE_TRAIN_VALIDATE result, set the INITIAL_METRICS in
# the train result, which can be used for best model selection.
if (
validate_result
and isinstance(validate_result, Shareable)
and validate_result.get_return_code() == ReturnCode.OK
):
try:
metrics_dxo = from_shareable(validate_result)
train_dxo = from_shareable(train_result)
train_dxo.meta[MetaKey.INITIAL_METRICS] = metrics_dxo.data.get(MetaKey.INITIAL_METRICS, 0)
return train_dxo.to_shareable()
except ValueError:
return train_result
else:
return train_result
def submit_model(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
model_name = shareable.get_header(AppConstants.SUBMIT_MODEL_NAME)
submit_model_result = self.learner.get_model_for_validation(model_name, fl_ctx)
if submit_model_result and isinstance(submit_model_result, Shareable):
return submit_model_result
else:
return make_reply(ReturnCode.EMPTY_RESULT)
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_debug(fl_ctx, f"validate abort_signal {abort_signal.triggered}")
shareable.set_header(AppConstants.VALIDATE_TYPE, ValidateType.MODEL_VALIDATE)
validate_result: Shareable = self.learner.validate(shareable, fl_ctx, abort_signal)
if validate_result and isinstance(validate_result, Shareable):
return validate_result
else:
return make_reply(ReturnCode.EMPTY_RESULT)
def finalize(self, fl_ctx: FLContext):
try:
if self.learner:
self.learner.finalize(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner finalize exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/app_common/executors/learner_executor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shlex
import subprocess
import threading
import time
from abc import abstractmethod
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.fuel.common.multi_process_executor_constants import (
CommunicateData,
CommunicationMetaData,
MultiProcessCommandNames,
)
from nvflare.fuel.f3.cellnet.core_cell import Message as CellMessage
from nvflare.fuel.f3.cellnet.core_cell import MessageHeaderKey
from nvflare.fuel.f3.cellnet.core_cell import ReturnCode as F3ReturnCode
from nvflare.fuel.f3.cellnet.core_cell import make_reply as F3make_reply
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.utils.class_utils import ModuleScanner
from nvflare.fuel.utils.component_builder import ComponentBuilder
from nvflare.private.defs import CellChannel, CellChannelTopic, new_cell_message
from nvflare.security.logging import secure_format_exception
class WorkerComponentBuilder(ComponentBuilder):
FL_PACKAGES = ["nvflare"]
FL_MODULES = ["client", "app"]
def __init__(self) -> None:
"""Component to build workers."""
super().__init__()
self.module_scanner = ModuleScanner(WorkerComponentBuilder.FL_PACKAGES, WorkerComponentBuilder.FL_MODULES, True)
def get_module_scanner(self):
return self.module_scanner
class MultiProcessExecutor(Executor):
def __init__(self, executor_id=None, num_of_processes=1, components=None):
"""Manage the multi-process execution life cycle.
Arguments:
executor_id: executor component ID
num_of_processes: number of processes to create
components: a dictionary for component classes to their arguments
"""
super().__init__()
self.executor_id = executor_id
self.components_conf = components
self.components = {}
self.handlers = []
self._build_components(components)
if not isinstance(num_of_processes, int):
raise TypeError("{} must be an instance of int but got {}".format(num_of_processes, type(num_of_processes)))
if num_of_processes < 1:
raise ValueError(f"{num_of_processes} must >= 1.")
self.num_of_processes = num_of_processes
self.executor = None
self.execute_result = None
self.execute_complete = None
self.engine = None
self.logger = logging.getLogger(self.__class__.__name__)
self.conn_clients = []
self.exe_process = None
self.stop_execute = False
self.relay_threads = []
self.finalized = False
self.event_lock = threading.Lock()
self.relay_lock = threading.Lock()
@abstractmethod
def get_multi_process_command(self) -> str:
"""Provide the command for starting multi-process execution.
Returns:
multi-process starting command
"""
return ""
def _build_components(self, components):
component_builder = WorkerComponentBuilder()
for item in components:
cid = item.get("id", None)
if not cid:
raise TypeError("missing component id")
self.components[cid] = component_builder.build_component(item)
if isinstance(self.components[cid], FLComponent):
self.handlers.append(self.components[cid])
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
self._pass_event_to_rank_processes(event_type, fl_ctx)
def _pass_event_to_rank_processes(self, event_type: str, fl_ctx: FLContext):
event_site = fl_ctx.get_prop(FLContextKey.EVENT_ORIGIN_SITE)
if self.engine:
if event_site != CommunicateData.SUB_WORKER_PROCESS:
with self.event_lock:
try:
data = {
CommunicationMetaData.COMMAND: CommunicateData.HANDLE_EVENT,
CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),
CommunicationMetaData.EVENT_TYPE: event_type,
}
# send the init data to all the child processes
request = new_cell_message({}, data)
self.engine.client.cell.fire_and_forget(
targets=self.targets,
channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,
topic=MultiProcessCommandNames.FIRE_EVENT,
message=request,
)
except Exception:
# Warning: Have to set fire_event=False, otherwise it will cause dead loop on the event handling!!!
self.log_warning(
fl_ctx,
f"Failed to relay the event to child processes. Event: {event_type}",
fire_event=False,
)
def initialize(self, fl_ctx: FLContext):
self.executor = self.components.get(self.executor_id, None)
if not isinstance(self.executor, Executor):
raise ValueError(
"invalid executor {}: expect Executor but got {}".format(self.executor_id, type(self.executor))
)
self._initialize_multi_process(fl_ctx)
def _initialize_multi_process(self, fl_ctx: FLContext):
try:
client_name = fl_ctx.get_identity_name()
job_id = fl_ctx.get_job_id()
self.engine = fl_ctx.get_engine()
simulate_mode = fl_ctx.get_prop(FLContextKey.SIMULATE_MODE, False)
cell = self.engine.client.cell
# Create the internal listener for grand child process
cell.make_internal_listener()
command = (
self.get_multi_process_command()
+ " -m nvflare.private.fed.app.client.sub_worker_process"
+ " -m "
+ fl_ctx.get_prop(FLContextKey.ARGS).workspace
+ " -c "
+ client_name
+ " -n "
+ job_id
+ " --num_processes "
+ str(self.num_of_processes)
+ " --simulator_engine "
+ str(simulate_mode)
+ " --parent_pid "
+ str(os.getpid())
+ " --root_url "
+ str(cell.get_root_url_for_child())
+ " --parent_url "
+ str(cell.get_internal_listener_url())
)
self.logger.info(f"multi_process_executor command: {command}")
# use os.setsid to create new process group ID
self.exe_process = subprocess.Popen(shlex.split(command, " "), preexec_fn=os.setsid, env=os.environ.copy())
# send the init data to all the child processes
cell.register_request_cb(
channel=CellChannel.MULTI_PROCESS_EXECUTOR,
topic=CellChannelTopic.EXECUTE_RESULT,
cb=self.receive_execute_result,
)
cell.register_request_cb(
channel=CellChannel.MULTI_PROCESS_EXECUTOR,
topic=CellChannelTopic.FIRE_EVENT,
cb=self._relay_fire_event,
)
self.targets = []
for i in range(self.num_of_processes):
fqcn = FQCN.join([cell.get_fqcn(), str(i)])
start = time.time()
while not cell.is_cell_reachable(fqcn):
time.sleep(1.0)
if time.time() - start > 60.0:
raise RuntimeError(f"Could not reach the communication cell: {fqcn}")
self.targets.append(fqcn)
request = new_cell_message(
{},
{
CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),
CommunicationMetaData.COMPONENTS: self.components_conf,
CommunicationMetaData.LOCAL_EXECUTOR: self.executor_id,
},
)
replies = cell.broadcast_request(
targets=self.targets,
channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,
topic=MultiProcessCommandNames.INITIALIZE,
request=request,
)
for name, reply in replies.items():
if reply.get_header(MessageHeaderKey.RETURN_CODE) != F3ReturnCode.OK:
self.log_exception(fl_ctx, "error initializing multi_process executor")
raise ValueError(reply.get_header(MessageHeaderKey.ERROR))
except Exception as e:
self.log_exception(fl_ctx, f"error initializing multi_process executor: {secure_format_exception(e)}")
def receive_execute_result(self, request: CellMessage) -> CellMessage:
return_data = request.payload
with self.engine.new_context() as fl_ctx:
fl_ctx.props.update(return_data[CommunicationMetaData.FL_CTX].props)
self.execute_result = return_data[CommunicationMetaData.SHAREABLE]
self.execute_complete = True
return F3make_reply(ReturnCode.OK, "", None)
def _relay_fire_event(self, request: CellMessage) -> CellMessage:
data = request.payload
with self.engine.new_context() as fl_ctx:
event_type = data[CommunicationMetaData.EVENT_TYPE]
rank_number = data[CommunicationMetaData.RANK_NUMBER]
with self.relay_lock:
fl_ctx.props.update(data[CommunicationMetaData.FL_CTX].props)
fl_ctx.set_prop(FLContextKey.FROM_RANK_NUMBER, rank_number, private=True, sticky=False)
fl_ctx.set_prop(
FLContextKey.EVENT_ORIGIN_SITE,
CommunicateData.SUB_WORKER_PROCESS,
private=True,
sticky=False,
)
self.engine.fire_event(event_type, fl_ctx)
return_data = {CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx)}
return F3make_reply(ReturnCode.OK, "", return_data)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if not self.executor:
raise RuntimeError("There's no executor for task {}".format(task_name))
self.execute_complete = False
self._execute_multi_process(task_name=task_name, shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
while not self.execute_complete:
time.sleep(0.2)
return self.execute_result
def _execute_multi_process(
self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal
) -> Shareable:
if abort_signal.triggered:
self.finalize(fl_ctx)
return make_reply(ReturnCode.OK)
self.engine = fl_ctx.get_engine()
try:
data = {
CommunicationMetaData.COMMAND: CommunicateData.EXECUTE,
CommunicationMetaData.TASK_NAME: task_name,
CommunicationMetaData.SHAREABLE: shareable,
CommunicationMetaData.FL_CTX: get_serializable_data(fl_ctx),
}
request = new_cell_message({}, data)
self.engine.client.cell.fire_and_forget(
targets=self.targets,
channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,
topic=MultiProcessCommandNames.TASK_EXECUTION,
message=request,
)
except Exception:
self.log_error(fl_ctx, "Multi-Process Execution error.")
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
def finalize(self, fl_ctx: FLContext):
"""This is called when exiting/aborting the executor."""
if self.finalized:
return
self.finalized = True
self.stop_execute = True
request = new_cell_message({}, None)
self.engine.client.cell.fire_and_forget(
targets=self.targets,
channel=CellChannel.CLIENT_SUB_WORKER_COMMAND,
topic=MultiProcessCommandNames.CLOSE,
message=request,
)
try:
os.killpg(os.getpgid(self.exe_process.pid), 9)
self.logger.debug("kill signal sent")
except Exception:
pass
if self.exe_process:
self.exe_process.terminate()
# wait for all relay threads to join!
for t in self.relay_threads:
if t.is_alive():
t.join()
self.log_info(fl_ctx, "Multi-Process Executor finalized!", fire_event=False)
| NVFlare-main | nvflare/app_common/executors/multi_process_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Optional
from nvflare.apis.dxo import DXO
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.task_handler import TaskHandler
class ErrorHandlingExecutor(Executor, ABC):
"""This class adds error handling mechanisms to Executor spec.
It also makes sharable convertible to DXO.
It delegates the task execution to TaskHandler.
"""
def __init__(self):
super().__init__()
self.init_status_ok = True
self.init_failure = {"abort_job": None, "fail_client": None}
self.client_name = None
self.task_handler: Optional[TaskHandler] = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
def initialize(self, fl_ctx: FLContext):
try:
self.client_name = fl_ctx.get_identity_name()
self.task_handler = self.get_task_handler(fl_ctx)
except TypeError as te:
self.log_exception(fl_ctx, f"{self.__class__.__name__} initialize failed.")
self.init_status_ok = False
self.init_failure = {"abort_job": te}
except Exception as e:
self.log_exception(fl_ctx, f"{self.__class__.__name__} initialize failed.")
self.init_status_ok = False
self.init_failure = {"fail_client": e}
@abstractmethod
def get_task_handler(self, fl_ctx: FLContext) -> TaskHandler:
pass
@abstractmethod
def get_data_kind(self) -> str:
pass
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
init_rc = self._check_init_status(fl_ctx)
if init_rc:
return make_reply(init_rc)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
try:
result = self.task_handler.execute_task(task_name, shareable, fl_ctx, abort_signal)
if result is not None:
dxo = DXO(data_kind=self.get_data_kind(), data=result)
return dxo.to_shareable()
self.log_error(
fl_ctx,
f"task:{task_name} failed on client:{fl_ctx.get_identity_name()} due to result is '{result}'\n",
)
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
except Exception:
self.log_exception(fl_ctx, f"{self.__class__.__name__} executes task {task_name} failed.")
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
def _check_init_status(self, fl_ctx: FLContext):
if not self.init_status_ok:
for fail_key in self.init_failure:
reason = self.init_failure[fail_key]
if fail_key == "abort_job":
return ReturnCode.EXECUTION_EXCEPTION
self.system_panic(reason, fl_ctx)
return ReturnCode.EXECUTION_RESULT_ERROR
return None
def finalize(self, fl_ctx: FLContext):
try:
if self.task_handler:
self.task_handler.finalize(fl_ctx)
except Exception:
self.log_exception(fl_ctx, f"{self.__class__.__name__} finalize exception.")
| NVFlare-main | nvflare/app_common/executors/error_handling_executor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/executors/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from nvflare.apis.fl_context import FLContext
from nvflare.apis.utils.decomposers import flare_decomposers
from nvflare.app_common.decomposers import common_decomposers
from nvflare.app_common.executors.launcher_executor import LauncherExecutor
from nvflare.fuel.utils.constants import Mode
from nvflare.fuel.utils.pipe.file_pipe import FilePipe
from nvflare.fuel.utils.pipe.pipe_handler import PipeHandler
from nvflare.fuel.utils.validation_utils import check_object_type
class FilePipeLauncherExecutor(LauncherExecutor):
def __init__(
self,
data_exchange_path: Optional[str] = None,
pipe_id: Optional[str] = None,
pipe_name: str = "pipe",
launcher_id: Optional[str] = None,
launch_timeout: Optional[float] = None,
task_wait_time: Optional[float] = None,
task_read_wait_time: Optional[float] = None,
result_poll_interval: float = 0.1,
read_interval: float = 0.1,
heartbeat_interval: float = 5.0,
heartbeat_timeout: float = 30.0,
workers: int = 1,
training: bool = True,
global_evaluation: bool = True,
from_nvflare_converter_id: Optional[str] = None,
to_nvflare_converter_id: Optional[str] = None,
) -> None:
"""Initializes the FilePipeLauncherExecutor.
Args:
data_exchange_path (Optional[str]): Path used for data exchange. If None, the "app_dir" of the running job will be used.
If pipe_id is provided, will use the Pipe gets from pipe_id.
pipe_id (Optional[str]): Identifier used to get the Pipe from NVFlare components.
pipe_name (str): Name of the pipe. Defaults to "pipe".
launcher_id (Optional[str]): Identifier used to get the Launcher from NVFlare components.
launch_timeout (Optional[float]): Timeout for the "launch" method to end. None means never timeout.
task_wait_time (Optional[float]): Time to wait for tasks to complete before exiting the executor. None means never timeout.
task_read_wait_time (Optional[float]): Time to wait for task results from the pipe. None means no wait.
result_poll_interval (float): Interval for polling task results from the pipe. Defaults to 0.1.
read_interval (float): Interval for reading from the pipe. Defaults to 0.1.
heartbeat_interval (float): Interval for sending heartbeat to the peer. Defaults to 5.0.
heartbeat_timeout (float): Timeout for waiting for a heartbeat from the peer. Defaults to 30.0.
workers (int): Number of worker threads needed.
training (bool): Whether to run training using global model. Defaults to True.
global_evaluation (bool): Whether to run evaluation on global model. Defaults to True.
from_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare controller side to executor side.
to_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare executor side to controller side.
"""
super().__init__(
pipe_id=pipe_id,
pipe_name=pipe_name,
launcher_id=launcher_id,
launch_timeout=launch_timeout,
task_wait_time=task_wait_time,
task_read_wait_time=task_read_wait_time,
result_poll_interval=result_poll_interval,
read_interval=read_interval,
heartbeat_interval=heartbeat_interval,
heartbeat_timeout=heartbeat_timeout,
workers=workers,
training=training,
global_evaluation=global_evaluation,
from_nvflare_converter_id=from_nvflare_converter_id,
to_nvflare_converter_id=to_nvflare_converter_id,
)
self._data_exchange_path = data_exchange_path
def initialize(self, fl_ctx: FLContext) -> None:
self._init_launcher(fl_ctx)
self._init_converter(fl_ctx)
engine = fl_ctx.get_engine()
# gets pipe
if self._pipe_id:
pipe: FilePipe = engine.get_component(self._pipe_id)
check_object_type(self._pipe_id, pipe, FilePipe)
self._data_exchange_path = pipe.root_path
else:
# gets data_exchange_path
if self._data_exchange_path is None or self._data_exchange_path == "":
app_dir = engine.get_workspace().get_app_dir(fl_ctx.get_job_id())
self._data_exchange_path = os.path.abspath(app_dir)
elif not os.path.isabs(self._data_exchange_path):
raise RuntimeError("data exchange path needs to be absolute.")
pipe = FilePipe(mode=Mode.ACTIVE, root_path=self._data_exchange_path)
# init pipe
flare_decomposers.register()
common_decomposers.register()
pipe.open(self._pipe_name)
self.pipe_handler = PipeHandler(
pipe,
read_interval=self._read_interval,
heartbeat_interval=self._heartbeat_interval,
heartbeat_timeout=self._heartbeat_timeout,
)
self.pipe_handler.start()
| NVFlare-main | nvflare/app_common/executors/file_pipe_launcher_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.executors.file_pipe_launcher_executor import FilePipeLauncherExecutor
from nvflare.app_common.model_exchange.constants import ModelExchangeFormat
from nvflare.client.config import ClientConfig, ConfigKey, TransferType
from nvflare.client.constants import CONFIG_EXCHANGE
class ClientAPILauncherExecutor(FilePipeLauncherExecutor):
def __init__(
self,
data_exchange_path: Optional[str] = None,
pipe_id: Optional[str] = None,
pipe_name: str = "pipe",
launcher_id: Optional[str] = None,
launch_timeout: Optional[float] = None,
task_wait_time: Optional[float] = None,
task_read_wait_time: Optional[float] = None,
result_poll_interval: float = 0.1,
read_interval: float = 0.1,
heartbeat_interval: float = 5.0,
heartbeat_timeout: float = 30.0,
workers: int = 1,
training: bool = True,
global_evaluation: bool = True,
params_exchange_format: ModelExchangeFormat = ModelExchangeFormat.NUMPY,
params_transfer_type: TransferType = TransferType.FULL,
from_nvflare_converter_id: Optional[str] = None,
to_nvflare_converter_id: Optional[str] = None,
) -> None:
"""Initializes the ClientAPILauncherExecutor.
Args:
data_exchange_path (Optional[str]): Path used for data exchange. If None, the "app_dir" of the running job will be used.
If pipe_id is provided, will use the Pipe gets from pipe_id.
pipe_id (Optional[str]): Identifier used to get the Pipe from NVFlare components.
pipe_name (str): Name of the pipe. Defaults to "pipe".
launcher_id (Optional[str]): Identifier used to get the Launcher from NVFlare components.
launch_timeout (Optional[float]): Timeout for the "launch" method to end. None means never timeout.
task_wait_time (Optional[float]): Time to wait for tasks to complete before exiting the executor. None means never timeout.
task_read_wait_time (Optional[float]): Time to wait for task results from the pipe. None means no wait.
result_poll_interval (float): Interval for polling task results from the pipe. Defaults to 0.1.
read_interval (float): Interval for reading from the pipe. Defaults to 0.1.
heartbeat_interval (float): Interval for sending heartbeat to the peer. Defaults to 5.0.
heartbeat_timeout (float): Timeout for waiting for a heartbeat from the peer. Defaults to 30.0.
workers (int): Number of worker threads needed.
training (bool): Whether to run training using global model. Defaults to True.
global_evaluation (bool): Whether to run evaluation on global model. Defaults to True.
params_exchange_format (ModelExchangeFormat): What format to exchange the parameters.
params_transfer_type (TransferType): How to transfer the parameters. FULL means the whole model parameters are sent.
DIFF means that only the difference is sent.
from_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare controller side to executor side.
to_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare executor side to controller side.
"""
super().__init__(
data_exchange_path=data_exchange_path,
pipe_id=pipe_id,
pipe_name=pipe_name,
launcher_id=launcher_id,
launch_timeout=launch_timeout,
task_wait_time=task_wait_time,
task_read_wait_time=task_read_wait_time,
result_poll_interval=result_poll_interval,
read_interval=read_interval,
heartbeat_interval=heartbeat_interval,
heartbeat_timeout=heartbeat_timeout,
workers=workers,
training=training,
global_evaluation=global_evaluation,
from_nvflare_converter_id=from_nvflare_converter_id,
to_nvflare_converter_id=to_nvflare_converter_id,
)
self._params_exchange_format = params_exchange_format
self._params_transfer_type = params_transfer_type
def prepare_config_for_launch(self, fl_ctx: FLContext):
workspace = fl_ctx.get_engine().get_workspace()
app_dir = workspace.get_app_dir(fl_ctx.get_job_id())
config_file = os.path.join(app_dir, workspace.config_folder, CONFIG_EXCHANGE)
client_config = ClientConfig()
self._update_config_exchange_dict(client_config.config)
client_config.to_json(config_file)
def _update_config_exchange_dict(self, config: Dict):
config[ConfigKey.GLOBAL_EVAL] = self._global_evaluation
config[ConfigKey.TRAINING] = self._training
config[ConfigKey.EXCHANGE_FORMAT] = self._params_exchange_format
config[ConfigKey.EXCHANGE_PATH] = self._data_exchange_path
config[ConfigKey.TRANSFER_TYPE] = self._params_transfer_type
| NVFlare-main | nvflare/app_common/executors/client_api_launcher_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from concurrent.futures import ThreadPoolExecutor
from threading import Event
from typing import Optional
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.launcher import Launcher, LauncherCompleteStatus
from nvflare.app_common.utils.fl_model_utils import FLModelUtils, ParamsConverter
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.pipe.pipe_handler import PipeHandler, Topic
from nvflare.fuel.utils.validation_utils import check_object_type
from nvflare.security.logging import secure_format_exception
class LauncherExecutor(Executor):
def __init__(
self,
pipe_id: str,
pipe_name: str = "pipe",
launcher_id: Optional[str] = None,
launch_timeout: Optional[float] = None,
task_wait_time: Optional[float] = None,
task_read_wait_time: Optional[float] = None,
result_poll_interval: float = 0.1,
read_interval: float = 0.1,
heartbeat_interval: float = 5.0,
heartbeat_timeout: float = 30.0,
workers: int = 1,
training: bool = True,
global_evaluation: bool = True,
from_nvflare_converter_id: Optional[str] = None,
to_nvflare_converter_id: Optional[str] = None,
) -> None:
"""Initializes the LauncherExecutor.
Args:
pipe_id (str): Identifier used to get the Pipe from NVFlare components.
pipe_name (str): Name of the pipe. Defaults to "pipe".
launcher_id (Optional[str]): Identifier used to get the Launcher from NVFlare components.
launch_timeout (Optional[float]): Timeout for the "launch" method to end. None means never timeout.
task_wait_time (Optional[float]): Time to wait for tasks to complete before exiting the executor. None means never timeout.
task_read_wait_time (Optional[float]): Time to wait for task results from the pipe. None means no wait.
result_poll_interval (float): Interval for polling task results from the pipe. Defaults to 0.1.
read_interval (float): Interval for reading from the pipe. Defaults to 0.1.
heartbeat_interval (float): Interval for sending heartbeat to the peer. Defaults to 5.0.
heartbeat_timeout (float): Timeout for waiting for a heartbeat from the peer. Defaults to 30.0.
workers (int): Number of worker threads needed.
training (bool): Whether to run training using global model. Defaults to True.
global_evaluation (bool): Whether to run evaluation on global model. Defaults to True.
from_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare controller side to executor side.
to_nvflare_converter_id (Optional[str]): Identifier used to get the ParamsConverter from NVFlare components.
This converter will be called when model is sent from nvflare executor side to controller side.
"""
super().__init__()
self._launcher_id = launcher_id
self.launch_timeout = launch_timeout
self.launcher: Optional[Launcher] = None
self._launcher_finish = Event()
self._launcher_finish_status = None
self._thread_pool_executor = ThreadPoolExecutor(max_workers=workers, thread_name_prefix=self.__class__.__name__)
self.pipe_handler: Optional[PipeHandler] = None
self._pipe_id = pipe_id
self._pipe_name = pipe_name
self._topic = "data"
self._read_interval = read_interval
self._heartbeat_interval = heartbeat_interval
self._heartbeat_timeout = heartbeat_timeout
self._task_wait_time = task_wait_time
self._result_poll_interval = result_poll_interval
self._task_read_wait_time = task_read_wait_time
# flags to indicate whether the launcher side will send back trained model and/or metrics
self._training = training
self._global_evaluation = global_evaluation
if self._training is False and self._global_evaluation is False:
raise RuntimeError("training and global_evaluation can't be both False.")
self._result_fl_model = None
self._result_metrics = None
self._from_nvflare_converter_id = from_nvflare_converter_id
self._from_nvflare_converter: Optional[ParamsConverter] = None
self._to_nvflare_converter_id = to_nvflare_converter_id
self._to_nvflare_converter: Optional[ParamsConverter] = None
def initialize(self, fl_ctx: FLContext) -> None:
self._init_launcher(fl_ctx)
self._init_converter(fl_ctx)
# gets pipe
engine = fl_ctx.get_engine()
pipe: Pipe = engine.get_component(self._pipe_id)
check_object_type(self._pipe_id, pipe, Pipe)
# init pipe
pipe.open(self._pipe_name)
self.pipe_handler = PipeHandler(
pipe,
read_interval=self._read_interval,
heartbeat_interval=self._heartbeat_interval,
heartbeat_timeout=self._heartbeat_timeout,
)
self.pipe_handler.start()
def handle_event(self, event_type: str, fl_ctx: FLContext) -> None:
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
self.prepare_config_for_launch(fl_ctx)
elif event_type == EventType.END_RUN:
if self.launcher:
self.launcher.finalize(fl_ctx)
self.log_info(fl_ctx, "END_RUN received - telling external to stop")
if self.pipe_handler is not None:
self.pipe_handler.notify_end("END_RUN received")
self.pipe_handler.stop(close_pipe=True)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
future = self._launch_in_new_thread(task_name, shareable, fl_ctx, abort_signal)
try:
launch_success = future.result(timeout=self.launch_timeout)
except TimeoutError:
self.log_error(fl_ctx, f"launch task: {task_name} exceeds {self.launch_timeout} seconds")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if not launch_success:
self.log_error(fl_ctx, f"launch task: {task_name} failed")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
future = self._wait_in_new_thread(task_name, fl_ctx, self._task_wait_time)
result = self._exchange(task_name, shareable, fl_ctx, abort_signal)
try:
completion_status = future.result(timeout=self._task_wait_time)
if completion_status != LauncherCompleteStatus.SUCCESS:
self.log_error(fl_ctx, "launcher execution failed")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
except TimeoutError:
self.log_error(fl_ctx, f"wait task: {task_name} exceeds {self._task_wait_time} seconds")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self._clear()
return result
def _init_launcher(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
launcher: Launcher = engine.get_component(self._launcher_id)
if launcher is not None:
check_object_type(self._launcher_id, launcher, Launcher)
launcher.initialize(fl_ctx)
self.launcher = launcher
def _init_converter(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
from_nvflare_converter: ParamsConverter = engine.get_component(self._from_nvflare_converter_id)
if from_nvflare_converter is not None:
check_object_type(self._from_nvflare_converter_id, from_nvflare_converter, ParamsConverter)
self._from_nvflare_converter = from_nvflare_converter
to_nvflare_converter: ParamsConverter = engine.get_component(self._to_nvflare_converter_id)
if to_nvflare_converter is not None:
check_object_type(self._to_nvflare_converter_id, to_nvflare_converter, ParamsConverter)
self._to_nvflare_converter = to_nvflare_converter
def prepare_config_for_launch(self, fl_ctx: FLContext):
"""Prepares any configuration for the process to be launched."""
pass
def _launch(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> bool:
if self.launcher:
return self.launcher.launch_task(task_name, shareable, fl_ctx, abort_signal)
return True
def _launch_in_new_thread(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
future = self._thread_pool_executor.submit(self._launch, task_name, shareable, fl_ctx, abort_signal)
return future
def _stop_launcher(self, task_name: str, fl_ctx: FLContext) -> None:
try:
if self.launcher:
self.launcher.stop_task(task_name=task_name, fl_ctx=fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"launcher stop exception: {secure_format_exception(e)}")
def _wait_launcher(self, task_name: str, fl_ctx: FLContext, timeout: Optional[float]) -> LauncherCompleteStatus:
return_status = LauncherCompleteStatus.FAILED
try:
if self.launcher:
return_status = self.launcher.wait_task(task_name=task_name, fl_ctx=fl_ctx, timeout=timeout)
except Exception as e:
self.log_exception(fl_ctx, f"launcher wait exception: {secure_format_exception(e)}")
self._stop_launcher(task_name=task_name, fl_ctx=fl_ctx)
self._launcher_finish.set()
self._launcher_finish_status = return_status
return return_status
def _wait_in_new_thread(self, task_name: str, fl_ctx: FLContext, timeout: Optional[float]):
future = self._thread_pool_executor.submit(self._wait_launcher, task_name, fl_ctx, timeout)
return future
def _exchange(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if self.pipe_handler is None:
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
model = FLModelUtils.from_shareable(shareable, self._from_nvflare_converter, fl_ctx)
req = Message.new_request(topic=self._topic, data=model)
has_been_read = self.pipe_handler.send_to_peer(req, timeout=self._task_read_wait_time)
if self._task_read_wait_time and not has_been_read:
self.log_error(
fl_ctx, f"failed to read task '{task_name}' in {self._task_read_wait_time} secs - aborting task!"
)
return make_reply(ReturnCode.SERVICE_UNAVAILABLE)
# wait for result
start = time.time()
while True:
if abort_signal.triggered:
self.log_error(fl_ctx, f"task '{task_name}' is aborted.")
self.pipe_handler.notify_abort(task_name)
self._stop_launcher(task_name, fl_ctx)
return make_reply(ReturnCode.TASK_ABORTED)
reply: Optional[Message] = self.pipe_handler.get_next()
if reply is None:
if self._task_wait_time and time.time() - start > self._task_wait_time:
self.log_error(fl_ctx, f"task '{task_name}' timeout after {self._task_wait_time} secs")
self.pipe_handler.notify_abort(task_name)
self._stop_launcher(task_name, fl_ctx)
self._log_result(fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
elif reply.topic == Topic.ABORT:
self.log_error(fl_ctx, f"the other end ask to abort task '{task_name}'")
self._stop_launcher(task_name, fl_ctx)
self._log_result(fl_ctx)
return make_reply(ReturnCode.TASK_ABORTED)
elif reply.topic in [Topic.END, Topic.PEER_GONE]:
self.log_error(fl_ctx, f"received {reply.topic} while waiting for result for {task_name}")
self._stop_launcher(task_name, fl_ctx)
self._log_result(fl_ctx)
return make_reply(ReturnCode.SERVICE_UNAVAILABLE)
elif reply.msg_type != Message.REPLY:
self.log_warning(
fl_ctx, f"ignored msg '{reply.topic}.{reply.req_id}' when waiting for '{req.topic}.{req.msg_id}'"
)
elif req.topic != reply.topic:
# ignore wrong task name
self.log_warning(fl_ctx, f"ignored '{reply.topic}' when waiting for '{req.topic}'")
elif req.msg_id != reply.req_id:
self.log_warning(fl_ctx, f"ignored '{reply.req_id}' when waiting for '{req.msg_id}'")
else:
self.log_info(fl_ctx, f"got result for task '{task_name}'")
if reply.data.params is not None:
self._result_fl_model = reply.data
if reply.data.metrics is not None:
self._result_metrics = reply.data
if self._check_exchange_exit():
break
if self._launcher_finish.is_set():
self.log_error(
fl_ctx,
f"Launcher already exited before exchange ended. Exit status is: '{self._launcher_finish_status}'",
)
self._log_result(fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
time.sleep(self._result_poll_interval)
result_fl_model = self._create_result_fl_model()
return FLModelUtils.to_shareable(result_fl_model, self._to_nvflare_converter)
def _log_result(self, fl_ctx):
if self._training and self._result_fl_model is None:
self.log_error(fl_ctx, "missing result FLModel with training flag True.")
if self._global_evaluation and self._result_metrics is None:
self.log_error(fl_ctx, "missing result metrics with global_evaluation flag True.")
def _check_exchange_exit(self):
if self._training and self._result_fl_model is None:
return False
if self._global_evaluation and self._result_metrics is None:
return False
return True
def _create_result_fl_model(self):
if self._result_fl_model is not None:
if self._result_metrics is not None:
self._result_fl_model.metrics = self._result_metrics.metrics
return self._result_fl_model
elif self._result_metrics is not None:
return self._result_metrics
else:
raise RuntimeError("Missing result fl model and result metrics")
def _clear(self):
self._result_fl_model = None
self._result_metrics = None
self._launcher_finish_status = None
self._launcher_finish.clear()
| NVFlare-main | nvflare/app_common/executors/launcher_executor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.executors.error_handling_executor import ErrorHandlingExecutor
from nvflare.app_common.executors.statistics.statistics_task_handler import StatisticsTaskHandler
"""
StatisticsExecutor is client-side executor that perform local statistics generation and communication to
FL Server global statistics controller.
The actual local statistics calculation would delegate to Statistics spec implementor.
"""
class StatisticsExecutor(ErrorHandlingExecutor):
def __init__(
self,
generator_id: str,
precision=4,
):
"""
Args:
generator_id: id of the statistics component
precision: number of precision digits
"""
super().__init__()
self.generator_id = generator_id
self.precision = precision
def get_data_kind(self) -> str:
return DataKind.STATISTICS
def get_task_handler(self, fl_ctx: FLContext):
task_handler = StatisticsTaskHandler(self.generator_id, self.precision)
task_handler.initialize(fl_ctx)
return task_handler
| NVFlare-main | nvflare/app_common/executors/statistics/statistics_executor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/executors/statistics/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.statistics_spec import Feature, Histogram, HistogramType, StatisticConfig, Statistics
from nvflare.app_common.abstract.task_handler import TaskHandler
from nvflare.app_common.app_constant import StatisticsConstants as StC
from nvflare.app_common.statistics.numeric_stats import filter_numeric_features
from nvflare.app_common.statistics.statisitcs_objects_decomposer import fobs_registration
from nvflare.app_common.statistics.statistics_config_utils import get_feature_bin_range
from nvflare.fuel.utils import fobs
from nvflare.security.logging import secure_format_exception
class StatisticsTaskHandler(TaskHandler):
"""
StatisticsTaskHandler is to be used together with StatisticsExecutor.
StatisticsExecutor is client-side executor that perform local statistics generation and communication to
FL Server global statistics controller. The actual local statistics calculation would delegate to
Statistics spec implementor.
"""
def __init__(self, generator_id: str, precision: int = 4):
super().__init__(generator_id, Statistics)
self.stats_generator: Optional[Statistics] = None
self.precision = precision
fobs_registration()
def initialize(self, fl_ctx: FLContext):
super().initialize(fl_ctx)
self.stats_generator = self.local_comp
def execute_task(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
client_name = fl_ctx.get_identity_name()
self.log_info(fl_ctx, f"Executing task '{task_name}' for client: '{client_name}'")
result = Shareable()
statistics_result = {}
if task_name == StC.FED_STATS_PRE_RUN:
# initial handshake
target_statistics: List[StatisticConfig] = fobs.loads(shareable.get(StC.STATS_TARGET_STATISTICS))
return self.pre_run(target_statistics)
elif task_name == StC.FED_STATS_TASK:
ds_features = self.get_numeric_features()
statistics_task = shareable.get(StC.STATISTICS_TASK_KEY)
target_statistics: List[StatisticConfig] = fobs.loads(shareable.get(StC.STATS_TARGET_STATISTICS))
if StC.STATS_FAILURE_COUNT not in target_statistics:
target_statistics.append(StatisticConfig(StC.STATS_FAILURE_COUNT, {}))
for tm in target_statistics:
fn = self.statistic_functions()[tm.name]
statistics_result[tm.name] = {}
self._populate_result_statistics(statistics_result, ds_features, tm, shareable, fl_ctx, fn)
# always add count for data privacy needs
if StC.STATS_COUNT not in statistics_result:
tm = StatisticConfig(StC.STATS_COUNT, {})
fn = self.get_count
statistics_result[tm.name] = {}
self._populate_result_statistics(statistics_result, ds_features, tm, shareable, fl_ctx, fn)
result[StC.STATISTICS_TASK_KEY] = statistics_task
if statistics_task == StC.STATS_1st_STATISTICS:
result[StC.STATS_FEATURES] = fobs.dumps(ds_features)
result[statistics_task] = fobs.dumps(statistics_result)
return result
else:
raise RuntimeError(ReturnCode.TASK_UNKNOWN)
def statistic_functions(self) -> dict:
return {
StC.STATS_COUNT: self.get_count,
StC.STATS_FAILURE_COUNT: self.get_failure_count,
StC.STATS_SUM: self.get_sum,
StC.STATS_MEAN: self.get_mean,
StC.STATS_STDDEV: self.get_stddev,
StC.STATS_VAR: self.get_variance_with_mean,
StC.STATS_HISTOGRAM: self.get_histogram,
StC.STATS_MAX: self.get_max_value,
StC.STATS_MIN: self.get_min_value,
}
def _populate_result_statistics(self, statistics_result, ds_features, tm: StatisticConfig, shareable, fl_ctx, fn):
for ds_name in ds_features:
statistics_result[tm.name][ds_name] = {}
features: List[Feature] = ds_features[ds_name]
for feature in features:
try:
statistics_result[tm.name][ds_name][feature.feature_name] = fn(
ds_name, feature.feature_name, tm, shareable, fl_ctx
)
except Exception as e:
self.log_exception(
fl_ctx,
f"Failed to populate result statistics of dataset {ds_name}"
f" and feature {feature.feature_name} with exception: {secure_format_exception(e)}",
)
def get_numeric_features(self) -> Dict[str, List[Feature]]:
ds_features: Dict[str, List[Feature]] = self.stats_generator.features()
return filter_numeric_features(ds_features)
def pre_run(self, target_statistics: List[StatisticConfig]):
feature_num_of_bins = None
feature_bin_ranges = None
target_statistic_keys = []
for mc in target_statistics:
target_statistic_keys.append(mc.name)
if mc.name == StC.STATS_HISTOGRAM:
hist_config = mc.config
feature_num_of_bins = {}
feature_bin_ranges = {}
for feature_name in hist_config:
num_of_bins: int = self.get_number_of_bins(feature_name, hist_config)
feature_num_of_bins[feature_name] = num_of_bins
bin_range = get_feature_bin_range(feature_name, hist_config)
feature_bin_ranges[feature_name] = bin_range
return self.stats_generator.pre_run(target_statistic_keys, feature_num_of_bins, feature_bin_ranges)
def get_count(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> int:
result = self.stats_generator.count(dataset_name, feature_name)
return result
def get_failure_count(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> int:
result = self.stats_generator.failure_count(dataset_name, feature_name)
return result
def get_sum(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
result = round(self.stats_generator.sum(dataset_name, feature_name), self.precision)
return result
def get_mean(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
count = self.stats_generator.count(dataset_name, feature_name)
sum_value = self.stats_generator.sum(dataset_name, feature_name)
if count is not None and sum_value is not None:
return round(sum_value / count, self.precision)
else:
# user did not implement count and/or sum, call means directly.
mean = round(self.stats_generator.mean(dataset_name, feature_name), self.precision)
# self._check_result(mean, statistic_configs.name)
return mean
def get_stddev(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
result = round(self.stats_generator.stddev(dataset_name, feature_name), self.precision)
return result
def get_variance_with_mean(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
result = None
if StC.STATS_GLOBAL_MEAN in inputs and StC.STATS_GLOBAL_COUNT in inputs:
global_mean = self._get_global_value_from_input(StC.STATS_GLOBAL_MEAN, dataset_name, feature_name, inputs)
global_count = self._get_global_value_from_input(StC.STATS_GLOBAL_COUNT, dataset_name, feature_name, inputs)
if global_mean is not None and global_count is not None:
result = self.stats_generator.variance_with_mean(dataset_name, feature_name, global_mean, global_count)
result = round(result, self.precision)
return result
def get_histogram(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> Histogram:
if StC.STATS_MIN in inputs and StC.STATS_MAX in inputs:
global_min_value = self._get_global_value_from_input(StC.STATS_MIN, dataset_name, feature_name, inputs)
global_max_value = self._get_global_value_from_input(StC.STATS_MAX, dataset_name, feature_name, inputs)
if global_min_value is not None and global_max_value is not None:
hist_config: dict = statistic_configs.config
num_of_bins: int = self.get_number_of_bins(feature_name, hist_config)
bin_range: List[float] = self.get_bin_range(
feature_name, global_min_value, global_max_value, hist_config
)
result = self.stats_generator.histogram(
dataset_name, feature_name, num_of_bins, bin_range[0], bin_range[1]
)
return result
else:
return Histogram(HistogramType.STANDARD, list())
else:
return Histogram(HistogramType.STANDARD, list())
def get_max_value(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
"""
get randomized max value
"""
hist_config: dict = statistic_configs.config
feature_bin_range = get_feature_bin_range(feature_name, hist_config)
if feature_bin_range is None:
client_max_value = self.stats_generator.max_value(dataset_name, feature_name)
return client_max_value
else:
return feature_bin_range[1]
def get_min_value(
self,
dataset_name: str,
feature_name: str,
statistic_configs: StatisticConfig,
inputs: Shareable,
fl_ctx: FLContext,
) -> float:
"""
get randomized min value
"""
hist_config: dict = statistic_configs.config
feature_bin_range = get_feature_bin_range(feature_name, hist_config)
if feature_bin_range is None:
client_min_value = self.stats_generator.min_value(dataset_name, feature_name)
return client_min_value
else:
return feature_bin_range[0]
def get_number_of_bins(self, feature_name: str, hist_config: dict) -> int:
err_msg = (
f"feature name = '{feature_name}': "
f"missing required '{StC.STATS_BINS}' config in histogram config = {hist_config}"
)
try:
num_of_bins = None
if feature_name in hist_config:
num_of_bins = hist_config[feature_name][StC.STATS_BINS]
else:
if "*" in hist_config:
default_config = hist_config["*"]
num_of_bins = default_config[StC.STATS_BINS]
if num_of_bins:
return num_of_bins
else:
raise Exception(err_msg)
except KeyError as e:
raise Exception(err_msg)
def get_bin_range(
self, feature_name: str, global_min_value: float, global_max_value: float, hist_config: dict
) -> List[float]:
global_bin_range = [global_min_value, global_max_value]
bin_range = get_feature_bin_range(feature_name, hist_config)
if bin_range is None:
bin_range = global_bin_range
return bin_range
def _get_global_value_from_input(self, statistic_key: str, dataset_name: str, feature_name: str, inputs):
global_value = None
if dataset_name in inputs[statistic_key]:
if feature_name in inputs[statistic_key][dataset_name]:
global_value = inputs[statistic_key][dataset_name][feature_name]
elif "*" in inputs[StC.STATS_MIN][dataset_name]:
global_value = inputs[statistic_key][dataset_name][feature_name]
return global_value
| NVFlare-main | nvflare/app_common/executors/statistics/statistics_task_handler.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class StatisticExecutorException(Exception):
pass
| NVFlare-main | nvflare/app_common/executors/statistics/statistics_executor_exception.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/launchers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
import sys
from typing import Optional
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.launcher import Launcher, LauncherCompleteStatus
class SubprocessLauncher(Launcher):
def __init__(self, script: str, clean_up_script: Optional[str] = None):
"""Initializes the SubprocessLauncher.
Args:
script (str): Script to be launched using subprocess.
clean_up_script (Optional[str]): Optional clean up script to be run after the main script execution.
"""
super().__init__()
self._app_dir = None
self._process = None
self._script = script
self._clean_up_script = clean_up_script
def initialize(self, fl_ctx: FLContext):
self._app_dir = self.get_app_dir(fl_ctx)
def launch_task(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> bool:
if self._process is None:
command = self._script
env = os.environ.copy()
command_seq = shlex.split(command)
self._process = subprocess.Popen(
command_seq, stdout=sys.stdout, stderr=subprocess.STDOUT, cwd=self._app_dir, env=env
)
return True
return False
def wait_task(self, task_name: str, fl_ctx: FLContext, timeout=None) -> LauncherCompleteStatus:
if self._process:
return_code = self._process.wait(timeout)
self.stop_task(task_name, fl_ctx)
if return_code == 0:
return LauncherCompleteStatus.SUCCESS
return LauncherCompleteStatus.FAILED
return LauncherCompleteStatus.SUCCESS
def stop_task(self, task_name: str, fl_ctx: FLContext) -> None:
if self._process:
self._process.terminate()
self._process.wait()
if self._clean_up_script:
command_seq = shlex.split(self._clean_up_script)
process = subprocess.Popen(command_seq, cwd=self._app_dir)
process.wait()
self._process = None
| NVFlare-main | nvflare/app_common/launchers/subprocess_launcher.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from nvflare.apis.resource_manager_spec import ResourceConsumerSpec
class _Consumer(ABC):
@abstractmethod
def consume(self, resources: list):
pass
class _GPUConsumer(_Consumer):
def __init__(self):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
def consume(self, resources: list):
"""Consumes resources.
Note that this class did not check physically if those GPUs exist.
"""
gpu_numbers = [str(x) for x in resources]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(gpu_numbers)
class ListResourceConsumer(ResourceConsumerSpec):
def __init__(self):
"""This class can be used with ListResourceManager.
Users can add custom _Consumer in the resource_consumer_map to handle new resource type.
"""
super().__init__()
self.resource_consumer_map = {"gpu": _GPUConsumer()}
def consume(self, resources: dict):
for key, consumer in self.resource_consumer_map.items():
if key in resources:
consumer.consume(resources[key])
| NVFlare-main | nvflare/app_common/resource_consumers/list_resource_consumer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.resource_manager_spec import ResourceConsumerSpec
from nvflare.fuel.utils.gpu_utils import get_host_gpu_ids, get_host_gpu_memory_free
class GPUResourceConsumer(ResourceConsumerSpec):
def __init__(self):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
def consume(self, resources: dict):
host_gpus = get_host_gpu_ids()
host_gpu_memory_free = get_host_gpu_memory_free(unit="MiB")
for gpu_id, gpu_mem in resources.items():
if gpu_id not in host_gpus:
raise RuntimeError(f"GPU ID {gpu_id} does not exist")
if gpu_mem * 1024.0 > host_gpu_memory_free[gpu_id]:
raise RuntimeError("GPU free mem is not enough")
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in resources.keys()])
| NVFlare-main | nvflare/app_common/resource_consumers/gpu_resource_consumer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/resource_consumers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.dxo import from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Task
from nvflare.apis.shareable import ReturnCode, Shareable
from nvflare.apis.signal import Signal
from nvflare.apis.workspace import Workspace
from nvflare.app_common.app_constant import AppConstants, ModelName
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.ccwf.common import Constant, ModelType, make_task_name
from nvflare.app_common.ccwf.server_ctl import ServerSideController
from nvflare.app_common.ccwf.val_result_manager import EvalResultManager
from nvflare.fuel.utils.validation_utils import (
DefaultValuePolicy,
check_positive_number,
check_str,
validate_candidate,
validate_candidates,
)
class CrossSiteEvalServerController(ServerSideController):
def __init__(
self,
task_name_prefix=Constant.TN_PREFIX_CROSS_SITE_EVAL,
start_task_timeout=Constant.START_TASK_TIMEOUT,
configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT,
eval_task_timeout=30,
task_check_period: float = Constant.TASK_CHECK_INTERVAL,
job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL,
progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT,
participating_clients=None,
evaluators=None,
evaluatees=None,
global_model_client=None,
max_status_report_interval: float = Constant.PER_CLIENT_STATUS_REPORT_TIMEOUT,
eval_result_dir=AppConstants.CROSS_VAL_DIR,
):
if not evaluatees:
evaluatees = []
if not evaluators:
evaluators = []
super().__init__(
num_rounds=1,
task_name_prefix=task_name_prefix,
start_task_timeout=start_task_timeout,
configure_task_timeout=configure_task_timeout,
task_check_period=task_check_period,
job_status_check_interval=job_status_check_interval,
participating_clients=participating_clients,
starting_client="",
starting_client_policy=DefaultValuePolicy.EMPTY,
max_status_report_interval=max_status_report_interval,
result_clients="",
result_clients_policy=DefaultValuePolicy.EMPTY,
progress_timeout=progress_timeout,
)
check_str("eval_result_dir", eval_result_dir)
check_positive_number("eval_task_timeout", eval_task_timeout)
if not global_model_client:
global_model_client = ""
self.global_model_client = global_model_client
self.eval_task_name = make_task_name(task_name_prefix, Constant.BASENAME_EVAL)
self.eval_task_timeout = eval_task_timeout
self.eval_local = False
self.eval_global = False
self.evaluators = evaluators
self.evaluatees = evaluatees
self.eval_result_dir = eval_result_dir
self.global_names = {}
self.eval_manager = None
self.current_round = 0
def start_controller(self, fl_ctx: FLContext):
super().start_controller(fl_ctx)
self.evaluators = validate_candidates(
var_name="evaluators",
candidates=self.evaluators,
base=self.participating_clients,
default_policy=DefaultValuePolicy.ALL,
allow_none=False,
)
self.evaluatees = validate_candidates(
var_name="evaluatees",
candidates=self.evaluatees,
base=self.participating_clients,
default_policy=DefaultValuePolicy.ALL,
allow_none=True,
)
self.global_model_client = validate_candidate(
var_name="global_model_client",
candidate=self.global_model_client,
base=self.participating_clients,
default_policy=DefaultValuePolicy.ANY,
allow_none=True,
)
if self.global_model_client:
self.eval_global = True
if self.evaluatees:
self.eval_local = True
if not self.eval_global and not self.eval_local:
raise RuntimeError("nothing to evaluate: you must set evaluatees and/or eval_global")
workspace: Workspace = self._engine.get_workspace()
run_dir = workspace.get_run_dir(fl_ctx.get_job_id())
cross_val_path = os.path.join(run_dir, self.eval_result_dir)
cross_val_results_dir = os.path.join(cross_val_path, AppConstants.CROSS_VAL_RESULTS_DIR_NAME)
self.eval_manager = EvalResultManager(cross_val_results_dir)
def prepare_config(self):
return {
Constant.EVAL_LOCAL: self.eval_local,
Constant.EVAL_GLOBAL: self.eval_global,
Constant.EVALUATORS: self.evaluators,
Constant.EVALUATEES: self.evaluatees,
Constant.GLOBAL_CLIENT: self.global_model_client,
}
def process_config_reply(self, client_name: str, reply: Shareable, fl_ctx: FLContext) -> bool:
global_names = reply.get(Constant.GLOBAL_NAMES)
if global_names:
for m in global_names:
if m not in self.global_names:
self.global_names[m] = client_name
self.log_info(fl_ctx, f"got global model name {m} from {client_name}")
return True
def _ask_to_evaluate(
self, current_round: int, model_name: str, model_type: str, model_owner: str, fl_ctx: FLContext
):
self.log_info(
fl_ctx,
f"R{current_round}: asking {self.evaluators} to evaluate {model_type} model '{model_name}' "
f"on client '{model_owner}'",
)
# Create validation task and broadcast to all participating clients.
task_data = Shareable()
task_data[AppConstants.CURRENT_ROUND] = current_round
task_data[Constant.MODEL_OWNER] = model_owner # client that holds the model
task_data[Constant.MODEL_NAME] = model_name
task_data[Constant.MODEL_TYPE] = model_type
task = Task(
name=self.eval_task_name,
data=task_data,
result_received_cb=self._process_eval_result,
timeout=self.eval_task_timeout,
)
self.broadcast(
task=task,
fl_ctx=fl_ctx,
targets=self.evaluators,
min_responses=len(self.evaluators),
wait_time_after_min_received=0,
)
def sub_flow(self, abort_signal: Signal, fl_ctx: FLContext):
if not self.global_names and not self.evaluatees:
self.system_panic("there are neither global models nor local models to evaluate!", fl_ctx)
return
# ask everyone to evaluate global model
if self.eval_global:
if len(self.global_names) == 0:
self.log_warning(fl_ctx, "no global models to evaluate!")
for m, owner in self.global_names.items():
self._ask_to_evaluate(
current_round=self.current_round,
model_name=m,
model_type=ModelType.GLOBAL,
model_owner=owner,
fl_ctx=fl_ctx,
)
self.current_round += 1
# ask everyone to eval everyone else's local model
for c in self.evaluatees:
self._ask_to_evaluate(
current_round=self.current_round,
model_name=ModelName.BEST_MODEL,
model_type=ModelType.LOCAL,
model_owner=c,
fl_ctx=fl_ctx,
)
self.current_round += 1
def is_sub_flow_done(self, fl_ctx: FLContext) -> bool:
return self.get_num_standing_tasks() == 0
def _process_eval_result(self, client_task: ClientTask, fl_ctx: FLContext):
# Find name of the client sending this
result = client_task.result
client_name = client_task.client.name
self._accept_eval_result(client_name=client_name, result=result, fl_ctx=fl_ctx)
def _accept_eval_result(self, client_name: str, result: Shareable, fl_ctx: FLContext):
model_owner = result.get_header(Constant.MODEL_OWNER, "")
model_type = result.get_header(Constant.MODEL_TYPE)
model_name = result.get_header(Constant.MODEL_NAME)
if model_type == ModelType.GLOBAL:
# global model
model_owner = "GLOBAL_" + model_name
model_info = model_owner
else:
model_info = f"{model_name} of {model_owner}"
# Fire event. This needs to be a new local context per each client
fl_ctx.set_prop(AppConstants.MODEL_OWNER, model_owner, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.DATA_CLIENT, client_name, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, result, private=True, sticky=False)
self.fire_event(AppEventType.VALIDATION_RESULT_RECEIVED, fl_ctx)
rc = result.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"bad evaluation result from client {client_name} on model {model_info}")
else:
dxo = from_shareable(result)
location = self.eval_manager.add_result(evaluatee=model_owner, evaluator=client_name, result=dxo)
self.log_info(fl_ctx, f"saved evaluation result from {client_name} on model {model_info} in {location}")
| NVFlare-main | nvflare/app_common/ccwf/cse_server_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import threading
import time
from nvflare.apis.controller_spec import Task
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.abstract.learnable import Learnable
from nvflare.app_common.abstract.metric_comparator import MetricComparator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.ccwf.client_ctl import ClientSideController
from nvflare.app_common.ccwf.common import Constant, NumberMetricComparator, ResultType, make_task_name
from nvflare.fuel.utils.validation_utils import check_non_empty_str, check_positive_int, check_positive_number
from nvflare.security.logging import secure_format_traceback
class _TrainerStatus:
def __init__(self, name: str):
self.name = name
self.reply_time = None
class Gatherer(FLComponent):
def __init__(
self,
task_data: Shareable,
fl_ctx: FLContext,
for_round: int,
executor: ClientSideController,
aggregator: Aggregator,
metric_comparator: MetricComparator,
all_clients: list,
trainers: list,
min_responses_required: int,
wait_time_after_min_resps_received: float,
timeout,
):
FLComponent.__init__(self)
self.fl_ctx = fl_ctx
self.executor = executor
self.aggregator = aggregator
self.metric_comparator = metric_comparator
self.all_clients = all_clients
self.trainers = trainers
self.for_round = for_round
self.trainer_statuses = {}
self.start_time = time.time()
self.timeout = timeout
for t in trainers:
self.trainer_statuses[t] = _TrainerStatus(t)
if min_responses_required <= 0 or min_responses_required >= len(trainers):
min_responses_required = len(trainers)
self.min_responses_required = min_responses_required
self.wait_time_after_min_resps_received = wait_time_after_min_resps_received
self.min_resps_received_time = None
self.lock = threading.Lock()
self.current_best_client = task_data.get_header(Constant.CLIENT)
self.current_best_global_metric = task_data.get_header(Constant.METRIC)
self.current_best_round = task_data.get_header(Constant.ROUND)
if not self.current_best_client:
self.log_info(fl_ctx, "gatherer starting from scratch")
else:
self.log_info(
fl_ctx,
f"gatherer starting with previous best result from client {self.current_best_client} "
f"with metric {self.current_best_global_metric} "
f"at round {self.current_best_round}",
)
def gather(self, client_name: str, result: Shareable, fl_ctx: FLContext) -> Shareable:
with self.lock:
try:
return self._do_gather(client_name, result, fl_ctx)
except:
self.log_error(fl_ctx, f"exception gathering: {secure_format_traceback()}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _do_gather(self, client_name: str, result: Shareable, fl_ctx: FLContext) -> Shareable:
result_round = result.get_header(AppConstants.CURRENT_ROUND)
ts = self.trainer_statuses.get(client_name)
if not ts:
self.log_error(
fl_ctx, f"received result from {client_name} for round {result_round}, but it is not a trainer"
)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if result_round > self.for_round:
# this should never happen!
# otherwise it means that the client is sending me result for a round that I couldn't possibly schedule!
self.log_error(
fl_ctx,
f"logic error: received result from {client_name} for round {result_round}, "
f"which is > gatherer's current round {self.for_round}",
)
self.executor.update_status(action="gather", error=ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if result_round < self.for_round:
# this is a late result for a round that I scheduled in the past.
# Note: we still accept it!
self.log_warning(
fl_ctx,
f"received late result from {client_name} for round {result_round}, "
f"which is < gatherer's current round {self.for_round}",
)
if result_round == self.for_round:
# this is the result that I'm waiting for.
now = time.time()
ts.reply_time = now
if not self.min_resps_received_time:
# see how many responses I have received
num_resps_received = 0
for _, ts in self.trainer_statuses.items():
if ts.reply_time:
num_resps_received += 1
if num_resps_received >= self.min_responses_required:
self.min_resps_received_time = now
rc = result.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"Bad result from {client_name} for round {result_round}: {rc}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self.for_round, private=True, sticky=True)
fl_ctx.set_prop(AppConstants.TRAINING_RESULT, result, private=True, sticky=False)
self.fire_event(AppEventType.BEFORE_CONTRIBUTION_ACCEPT, fl_ctx)
accepted = self.aggregator.accept(result, fl_ctx)
accepted_msg = "ACCEPTED" if accepted else "REJECTED"
self.log_info(
fl_ctx, f"Contribution from {client_name} {accepted_msg} by the aggregator at round {result_round}."
)
fl_ctx.set_prop(AppConstants.AGGREGATION_ACCEPTED, accepted, private=True, sticky=False)
self.fire_event(AppEventType.AFTER_CONTRIBUTION_ACCEPT, fl_ctx)
return make_reply(ReturnCode.OK)
def aggregate(self):
fl_ctx = self.fl_ctx
self.log_info(fl_ctx, f"Start aggregation for round {self.for_round}")
self.fire_event(AppEventType.BEFORE_AGGREGATION, fl_ctx)
aggr_result = self.aggregator.aggregate(fl_ctx)
fl_ctx.set_prop(AppConstants.AGGREGATION_RESULT, aggr_result, private=True, sticky=False)
self.fire_event(AppEventType.AFTER_AGGREGATION, fl_ctx)
self.log_info(fl_ctx, f"Finished aggregation for round {self.for_round}")
mine_is_better = False
if self.current_best_global_metric is not None:
if (
self.executor.best_metric is not None
and self.metric_comparator.compare(self.executor.best_metric, self.current_best_global_metric) > 0
):
mine_is_better = True
elif self.executor.best_metric is not None:
mine_is_better = True
if mine_is_better:
self.log_info(
fl_ctx, f"I got better metric {self.executor.best_metric} at round {self.executor.best_round}"
)
best_round = self.executor.best_round
best_metric = self.executor.best_metric
best_client = self.executor.me
else:
best_round = self.current_best_round
best_metric = self.current_best_global_metric
best_client = self.current_best_client
self.log_info(fl_ctx, f"global best metric is {best_metric} from client {best_client} at round {best_round}")
aggr_result.set_header(Constant.ROUND, best_round)
aggr_result.set_header(Constant.METRIC, best_metric)
aggr_result.set_header(Constant.CLIENT, best_client)
return aggr_result
def is_done(self):
unfinished = 0
for c, s in self.trainer_statuses.items():
if not s.reply_time:
unfinished += 1
if unfinished == 0:
return True
# timeout?
now = time.time()
if self.timeout and now - self.start_time > self.timeout:
self.log_warning(self.fl_ctx, f"gatherer for round {self.for_round} timed out after {self.timeout} seconds")
return True
if (
self.min_resps_received_time
and now - self.min_resps_received_time > self.wait_time_after_min_resps_received
):
# received min responses required and waited for long time
self.log_info(
self.fl_ctx,
f"gatherer for round {self.for_round} exit after {self.wait_time_after_min_resps_received} seconds "
f"since received minimum responses",
)
return True
class SwarmClientController(ClientSideController):
def __init__(
self,
task_name_prefix=Constant.TN_PREFIX_SWARM,
learn_task_name=AppConstants.TASK_TRAIN,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID,
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
aggregator_id=AppConstants.DEFAULT_AGGREGATOR_ID,
metric_comparator_id=None,
learn_task_check_interval=Constant.LEARN_TASK_CHECK_INTERVAL,
learn_task_abort_timeout=Constant.LEARN_TASK_ABORT_TIMEOUT,
learn_task_ack_timeout=Constant.LEARN_TASK_ACK_TIMEOUT,
learn_task_timeout=None,
final_result_ack_timeout=Constant.FINAL_RESULT_ACK_TIMEOUT,
min_responses_required: int = 1,
wait_time_after_min_resps_received: float = 10.0,
):
check_non_empty_str("learn_task_name", learn_task_name)
check_non_empty_str("persistor_id", persistor_id)
check_non_empty_str("shareable_generator_id", shareable_generator_id)
check_non_empty_str("aggregator_id", aggregator_id)
if metric_comparator_id:
check_non_empty_str("metric_comparator_id", metric_comparator_id)
if learn_task_timeout:
check_positive_number("learn_task_timeout", learn_task_timeout)
check_positive_int("min_responses_required", min_responses_required)
check_positive_number("wait_time_after_min_resps_received", wait_time_after_min_resps_received)
super().__init__(
task_name_prefix=task_name_prefix,
learn_task_name=learn_task_name,
persistor_id=persistor_id,
shareable_generator_id=shareable_generator_id,
learn_task_check_interval=learn_task_check_interval,
learn_task_ack_timeout=learn_task_ack_timeout,
learn_task_abort_timeout=learn_task_abort_timeout,
final_result_ack_timeout=final_result_ack_timeout,
allow_busy_task=True,
)
self.metric_comparator_id = metric_comparator_id
self.metric_comparator = None
self.report_learn_result_task_name = make_task_name(task_name_prefix, Constant.BASENAME_REPORT_LEARN_RESULT)
self.learn_task_timeout = learn_task_timeout
self.min_responses_required = min_responses_required
self.wait_time_after_min_resps_received = wait_time_after_min_resps_received
self.aggregator_id = aggregator_id
self.aggregator = None
self.gatherer = None
self.gatherer_waiter = threading.Event()
self.trainers = None
self.aggrs = None
self.is_trainer = False
self.is_aggr = False
self.last_aggr_round_done = -1
def process_config(self, fl_ctx: FLContext):
all_clients = self.get_config_prop(Constant.CLIENTS)
self.trainers = self.get_config_prop(Constant.TRAIN_CLIENTS)
if not self.trainers:
self.trainers = all_clients
self.is_trainer = self.me in self.trainers
self.aggrs = self.get_config_prop(Constant.AGGR_CLIENTS)
if not self.aggrs:
self.aggrs = all_clients
self.is_aggr = self.me in self.aggrs
self.engine.register_aux_message_handler(
topic=self.topic_for_my_workflow(Constant.TOPIC_SHARE_RESULT),
message_handle_func=self._process_share_result,
)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self.report_learn_result_task_name:
return self._process_learn_result(shareable, fl_ctx, abort_signal)
return super().execute(task_name, shareable, fl_ctx, abort_signal)
def start_run(self, fl_ctx: FLContext):
super().start_run(fl_ctx)
self.aggregator = self.engine.get_component(self.aggregator_id)
if not isinstance(self.aggregator, Aggregator):
self.system_panic(
f"aggregator {self.aggregator_id} must be an Aggregator but got {type(self.aggregator)}",
fl_ctx,
)
return
if self.metric_comparator_id:
self.metric_comparator = self.engine.get_component(self.metric_comparator_id)
if not isinstance(self.metric_comparator, MetricComparator):
self.system_panic(
f"metric comparator {self.metric_comparator_id} must be a MetricComparator "
f"but got {type(self.metric_comparator)}",
fl_ctx,
)
return
else:
# use default comparator
self.metric_comparator = NumberMetricComparator()
aggr_thread = threading.Thread(target=self._monitor_gather)
aggr_thread.daemon = True
aggr_thread.start()
self.log_info(fl_ctx, "started aggregator thread")
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == AppEventType.GLOBAL_BEST_MODEL_AVAILABLE:
client = fl_ctx.get_prop(Constant.CLIENT)
if client and client != self.me:
# this global best model is from other client
# we got here because this event is fired when I receive the best model shared from another
# client at the end of the workflow.
return
# we got here because the best model selector fired this event: it found the "local best global"
self.best_metric = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT)
self.best_result = copy.deepcopy(fl_ctx.get_prop(AppConstants.GLOBAL_MODEL))
self.log_info(fl_ctx, f"got GLOBAL_BEST_MODEL_AVAILABLE: best metric={self.best_metric}")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.best_round = current_round
self.update_status(last_round=current_round, action="better_aggregation")
else:
super().handle_event(event_type, fl_ctx)
def start_workflow(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
clients = self.get_config_prop(Constant.CLIENTS)
aggr_clients = self.get_config_prop(Constant.AGGR_CLIENTS, [])
train_clients = self.get_config_prop(Constant.TRAIN_CLIENTS, [])
self.log_info(
fl_ctx, f"Starting Swarm Workflow on clients {clients}, aggrs {aggr_clients}, trainers {train_clients}"
)
if not self._scatter(
task_data=shareable, for_round=self.get_config_prop(Constant.START_ROUND, 0), fl_ctx=fl_ctx
):
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.log_info(fl_ctx, "Started Swarm Workflow")
return make_reply(ReturnCode.OK)
def _scatter(self, task_data: Shareable, for_round: int, fl_ctx: FLContext) -> bool:
clients = self.get_config_prop(Constant.TRAIN_CLIENTS)
aggr_clients = self.get_config_prop(Constant.AGGR_CLIENTS)
# determine aggr client
aggr = random.choice(aggr_clients)
task_data.set_header(AppConstants.CURRENT_ROUND, for_round)
task_data.add_cookie(AppConstants.CONTRIBUTION_ROUND, for_round)
task_data.set_header(Constant.AGGREGATOR, aggr)
targets = copy.copy(clients)
if aggr not in targets:
targets.append(aggr)
self.log_info(fl_ctx, f"broadcasting learn task of round {for_round} to {targets}; aggr client is {aggr}")
return self.send_learn_task(targets=targets, request=task_data, fl_ctx=fl_ctx)
def _monitor_gather(self):
while True:
if self.asked_to_stop:
return
gatherer = self.gatherer
if gatherer:
assert isinstance(gatherer, Gatherer)
if gatherer.is_done():
self.last_aggr_round_done = gatherer.for_round
self.gatherer = None
self.gatherer_waiter.clear()
try:
self._end_gather(gatherer)
except:
self.logger.error(f"exception ending gatherer: {secure_format_traceback()}")
self.update_status(action="aggregate", error=ReturnCode.EXECUTION_EXCEPTION)
time.sleep(0.2)
def _end_gather(self, gatherer: Gatherer):
fl_ctx = gatherer.fl_ctx
try:
aggr_result = gatherer.aggregate()
except:
self.log_error(fl_ctx, f"exception in aggregation: {secure_format_traceback()}")
self.update_status(action="aggregate", error=ReturnCode.EXECUTION_EXCEPTION)
return
# aggr_result could be just weight diffs, not full weights!
# need to call shareable_to_learnable to get full weights.
self.log_info(fl_ctx, f"aggr result: {aggr_result}")
global_weights = self.shareable_generator.shareable_to_learnable(aggr_result, fl_ctx)
self.record_last_result(fl_ctx, gatherer.for_round, global_weights)
# are we done with training?
num_rounds_done = gatherer.for_round - self.get_config_prop(Constant.START_ROUND, 0) + 1
if num_rounds_done >= self.get_config_prop(AppConstants.NUM_ROUNDS):
self.log_info(fl_ctx, f"Swarm Learning Done: number of rounds completed {num_rounds_done}")
# determine the best global result
self._distribute_final_results(aggr_result, fl_ctx)
return
# continue next round
next_round_data = self.shareable_generator.learnable_to_shareable(global_weights, fl_ctx)
assert isinstance(next_round_data, Shareable)
best_round = aggr_result.get_header(Constant.ROUND)
best_metric = aggr_result.get_header(Constant.METRIC)
best_client = aggr_result.get_header(Constant.CLIENT)
if best_client:
next_round_data.set_header(Constant.ROUND, best_round)
next_round_data.set_header(Constant.CLIENT, best_client)
next_round_data.set_header(Constant.METRIC, best_metric)
self._scatter(next_round_data, gatherer.for_round + 1, gatherer.fl_ctx)
def _ask_to_share_best_result(self, client: str, metric, fl_ctx: FLContext):
# other client has best model - ask it to distribute its result
self.log_info(fl_ctx, f"client {client} has the best metric {metric} - ask it to share result")
resp = self.engine.send_aux_request(
targets=[client],
topic=self.topic_for_my_workflow(Constant.TOPIC_SHARE_RESULT),
request=Shareable(),
timeout=self.final_result_ack_timeout,
fl_ctx=fl_ctx,
secure=False,
)
assert isinstance(resp, dict)
reply = resp.get(client)
if not reply:
self.log_error(fl_ctx, f"failed to ask client {client} to share final result")
return
if not isinstance(reply, Shareable):
self.log_error(fl_ctx, f"client {client} failed to respond to share final result request")
return
rc = reply.get_return_code()
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"client {client} failed to respond to share final result request: {rc}")
def _distribute_final_results(self, aggr_result: Shareable, fl_ctx: FLContext):
best_client = aggr_result.get_header(Constant.CLIENT)
best_metric = aggr_result.get_header(Constant.METRIC)
if best_client:
if best_client == self.me:
# I have the best model
self.log_info(fl_ctx, f"I have global best metric {best_metric}")
self.broadcast_final_result(
fl_ctx, ResultType.BEST, self.best_result, self.best_metric, self.best_round
)
else:
try:
self._ask_to_share_best_result(best_client, best_metric, fl_ctx)
except:
self.log_error(
fl_ctx, f"error asking client {best_client} to share best result {secure_format_traceback()}"
)
else:
self.log_info(fl_ctx, "No global best result!")
self.log_info(fl_ctx, "distributing last result")
self.broadcast_final_result(fl_ctx, ResultType.LAST, self.last_result, round_num=self.last_round)
def _process_learn_result(self, request: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
client_name = peer_ctx.get_identity_name()
current_round = request.get_header(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"got training result from {client_name} for round {current_round}")
# to be compatible with some widgets that rely on peer_ctx to get result
peer_ctx.set_prop(FLContextKey.SHAREABLE, request)
gatherer = self.gatherer
if not gatherer:
# this could be from a fast client before I even create the waiter;
# or from a late client after I already finished gathering.
if current_round <= self.last_aggr_round_done:
# late client case - drop the result
self.log_info(fl_ctx, f"dropped result from late {client_name} for round {current_round}")
return make_reply(ReturnCode.OK)
# case of fast client
# wait until the gatherer is set up.
self.log_info(fl_ctx, f"got result from {client_name} for round {current_round} before gatherer setup")
self.gatherer_waiter.wait(self.learn_task_abort_timeout)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
gatherer = self.gatherer
if not gatherer:
self.log_error(fl_ctx, f"Still no gatherer after {self.learn_task_abort_timeout} seconds")
self.log_error(fl_ctx, f"Ignored result from {client_name} for round {current_round} since no gatherer")
self.update_status(action="wait_for_gatherer", error=ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
assert isinstance(gatherer, Gatherer)
if gatherer.for_round != current_round:
self.log_warning(
fl_ctx,
f"Got result from {client_name} for round {current_round}, "
f"but I'm waiting for round {gatherer.for_round}",
)
return gatherer.gather(client_name, request, fl_ctx)
except:
self.log_exception(fl_ctx, f"exception processing learn result: {secure_format_traceback()}")
self.update_status(action="process_learn_result", error=ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def do_learn_task(self, name: str, task_data: Shareable, fl_ctx: FLContext, abort_signal: Signal):
# set status report of starting task
current_round = task_data.get_header(AppConstants.CURRENT_ROUND)
self.update_status(last_round=current_round, action="start_learn_task")
aggr = task_data.get_header(Constant.AGGREGATOR)
if not aggr:
self.log_error(fl_ctx, f"missing aggregation client for round {current_round}")
self.update_status(action="do_learn_task", error=ReturnCode.EXECUTION_EXCEPTION)
return
self.log_info(fl_ctx, f"Round {current_round} started.")
# Some shareable generators assume the base model (GLOBAL_MODEL) is always available, which is true for
# server-controlled fed-avg. But this is not true for swarm learning.
# To make these generators happy, we create an empty global model here if not present.
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
if not base_model:
base_model = Learnable()
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, base_model, private=True, sticky=True)
global_weights = self.shareable_generator.shareable_to_learnable(task_data, fl_ctx)
self.log_info(fl_ctx, f"current global model: {global_weights}")
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, global_weights, private=True, sticky=True)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, current_round, private=True, sticky=True)
self.fire_event(AppEventType.ROUND_STARTED, fl_ctx)
if self.me == aggr:
# set up the aggr waiter
gatherer = self.gatherer
if gatherer:
# already waiting for aggregation - should never happen
self.log_error(
fl_ctx,
f"logic error: got task for round {current_round} while gathering for round {gatherer.for_round}",
)
self.update_status(action="do_learn_task", error=ReturnCode.EXECUTION_EXCEPTION)
return
self.log_info(fl_ctx, f"setting up the gatherer for round {current_round}")
self.gatherer = Gatherer(
fl_ctx=fl_ctx,
all_clients=self.get_config_prop(Constant.CLIENTS),
metric_comparator=self.metric_comparator,
trainers=self.trainers,
for_round=current_round,
timeout=self.learn_task_timeout,
min_responses_required=self.min_responses_required,
wait_time_after_min_resps_received=self.wait_time_after_min_resps_received,
aggregator=self.aggregator,
executor=self,
task_data=task_data,
)
self.gatherer_waiter.set()
# execute the task
if self.is_trainer:
# update status
result = self.execute_learn_task(task_data, fl_ctx, abort_signal)
rc = result.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"learn executor failed: {rc}")
self.update_status(action="learner_execution", error=rc)
return
# send the result to the aggr
self.log_info(fl_ctx, f"sending training result to aggregation client {aggr}")
task = Task(
name=self.report_learn_result_task_name,
data=result,
timeout=int(self.learn_task_ack_timeout),
)
resp = self.broadcast_and_wait(
task=task,
targets=[aggr],
min_responses=1,
fl_ctx=fl_ctx,
)
reply = resp.get(aggr)
if not reply:
self.log_error(fl_ctx, f"failed to receive reply from aggregation client: {aggr}")
self.update_status(action="receive_learn_result_reply", error=ReturnCode.EXECUTION_EXCEPTION)
return
if not isinstance(reply, Shareable):
self.log_error(
fl_ctx, f"bad reply from aggregation client {aggr}: expect Shareable but got {type(reply)}"
)
self.update_status(action="receive_learn_result_reply", error=ReturnCode.EXECUTION_EXCEPTION)
return
rc = reply.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"bad return code from aggregation client {aggr}: {rc}")
self.update_status(action="receive_learn_result_reply", error=ReturnCode.EXECUTION_EXCEPTION)
return
self.log_info(fl_ctx, f"Finished round {current_round}")
# update status
self.update_status(last_round=current_round, action="finished_learn_task")
def _process_share_result(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
client_name = peer_ctx.get_identity_name()
if not self.best_result:
self.log_error(
fl_ctx, f"got request from {client_name} to share my best result, but I don't have best result"
)
return make_reply(ReturnCode.BAD_REQUEST_DATA)
self.update_status(action="start_share_result_request_process")
self.broadcast_final_result(
fl_ctx, ResultType.BEST, self.best_result, metric=self.best_metric, round_num=self.best_round
)
return make_reply(ReturnCode.OK)
| NVFlare-main | nvflare/app_common/ccwf/swarm_client_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.app_common.ccwf.common import Constant, CyclicOrder
from nvflare.app_common.ccwf.server_ctl import ServerSideController
from nvflare.fuel.utils.validation_utils import DefaultValuePolicy, check_str, normalize_config_arg
class CyclicServerController(ServerSideController):
def __init__(
self,
num_rounds: int,
task_name_prefix=Constant.TN_PREFIX_CYCLIC,
start_task_timeout=Constant.START_TASK_TIMEOUT,
configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT,
task_check_period: float = Constant.TASK_CHECK_INTERVAL,
job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL,
participating_clients=None,
result_clients=None,
starting_client: str = "",
max_status_report_interval: float = Constant.PER_CLIENT_STATUS_REPORT_TIMEOUT,
progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT,
cyclic_order: str = CyclicOrder.FIXED,
):
result_clients = normalize_config_arg(result_clients)
starting_client = normalize_config_arg(starting_client)
if starting_client is None:
raise ValueError("starting_client must be specified")
super().__init__(
num_rounds=num_rounds,
task_name_prefix=task_name_prefix,
start_task_timeout=start_task_timeout,
configure_task_timeout=configure_task_timeout,
task_check_period=task_check_period,
job_status_check_interval=job_status_check_interval,
participating_clients=participating_clients,
result_clients=result_clients,
result_clients_policy=DefaultValuePolicy.ALL,
starting_client=starting_client,
starting_client_policy=DefaultValuePolicy.ANY,
max_status_report_interval=max_status_report_interval,
progress_timeout=progress_timeout,
)
check_str("cyclic_order", cyclic_order)
if cyclic_order not in [CyclicOrder.FIXED, CyclicOrder.RANDOM]:
raise ValueError(f"invalid rr_order {cyclic_order}: must be in {[CyclicOrder.FIXED, CyclicOrder.RANDOM]}")
self.cyclic_order = cyclic_order
def prepare_config(self):
return {Constant.ORDER: self.cyclic_order}
| NVFlare-main | nvflare/app_common/ccwf/cyclic_server_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import threading
import time
from abc import abstractmethod
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.task_controller import Task, TaskController
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learnable import Learnable
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.ccwf.common import Constant, ResultType, StatusReport, make_task_name, topic_for_end_workflow
from nvflare.fuel.utils.validation_utils import check_non_empty_str, check_number_range, check_positive_number
from nvflare.security.logging import secure_format_traceback
class _LearnTask:
def __init__(self, task_name: str, task_data: Shareable, fl_ctx: FLContext):
self.task_name = task_name
self.task_data = task_data
self.fl_ctx = fl_ctx
self.abort_signal = Signal()
class ClientSideController(Executor, TaskController):
def __init__(
self,
task_name_prefix: str,
learn_task_name=AppConstants.TASK_TRAIN,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID,
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
learn_task_check_interval=Constant.LEARN_TASK_CHECK_INTERVAL,
learn_task_ack_timeout=Constant.LEARN_TASK_ACK_TIMEOUT,
learn_task_abort_timeout=Constant.LEARN_TASK_ABORT_TIMEOUT,
final_result_ack_timeout=Constant.FINAL_RESULT_ACK_TIMEOUT,
allow_busy_task: bool = False,
):
"""
Constructor of a ClientSideController object.
Args:
task_name_prefix: prefix of task names. All CCWF task names are prefixed with this.
learn_task_name: name for the Learning Task (LT)
persistor_id: ID of the persistor component
shareable_generator_id: ID of the shareable generator component
learn_task_check_interval: interval for checking incoming Learning Task (LT)
learn_task_ack_timeout: timeout for sending the LT to other client(s)
final_result_ack_timeout: timeout for sending final result to participating clients
learn_task_abort_timeout: time to wait for the LT to become stopped after aborting it
allow_busy_task: whether a new learn task is allowed when working on current learn task
"""
check_non_empty_str("task_name_prefix", task_name_prefix)
check_positive_number("learn_task_check_interval", learn_task_check_interval)
check_number_range("learn_task_ack_timeout", learn_task_ack_timeout, min_value=1.0)
check_positive_number("learn_task_abort_timeout", learn_task_abort_timeout)
check_number_range("final_result_ack_timeout", final_result_ack_timeout, min_value=1.0)
Executor.__init__(self)
TaskController.__init__(self)
self.task_name_prefix = task_name_prefix
self.start_task_name = make_task_name(task_name_prefix, Constant.BASENAME_START)
self.configure_task_name = make_task_name(task_name_prefix, Constant.BASENAME_CONFIG)
self.do_learn_task_name = make_task_name(task_name_prefix, Constant.BASENAME_LEARN)
self.report_final_result_task_name = make_task_name(task_name_prefix, Constant.BASENAME_REPORT_FINAL_RESULT)
self.learn_task_name = learn_task_name
self.learn_task_abort_timeout = learn_task_abort_timeout
self.learn_task_check_interval = learn_task_check_interval
self.learn_task_ack_timeout = learn_task_ack_timeout
self.final_result_ack_timeout = final_result_ack_timeout
self.allow_busy_task = allow_busy_task
self.persistor_id = persistor_id
self.shareable_generator_id = shareable_generator_id
self.persistor = None
self.shareable_generator = None
self.current_status = StatusReport()
self.last_status_report_time = time.time() # time of last status report to server
self.config = None
self.workflow_id = None
self.finalize_lock = threading.Lock()
self.learn_thread = threading.Thread(target=self._do_learn)
self.learn_thread.daemon = True
self.learn_task = None
self.current_task = None
self.learn_executor = None
self.learn_task_lock = threading.Lock()
self.asked_to_stop = False
self.status_lock = threading.Lock()
self.engine = None
self.me = None
self.is_starting_client = False
self.last_result = None
self.last_round = None
self.best_result = None
self.best_metric = None
self.best_round = 0
self.workflow_done = False
def get_config_prop(self, name: str, default=None):
"""
Get a specified config property.
Args:
name: name of the property
default: default value to return if the property is not defined.
Returns:
"""
if not self.config:
return default
return self.config.get(name, default)
def start_run(self, fl_ctx: FLContext):
self.engine = fl_ctx.get_engine()
if not self.engine:
self.system_panic("no engine", fl_ctx)
return
runner = fl_ctx.get_prop(FLContextKey.RUNNER)
if not runner:
self.system_panic("no client runner", fl_ctx)
return
self.me = fl_ctx.get_identity_name()
if self.learn_task_name:
self.learn_executor = runner.find_executor(self.learn_task_name)
if not self.learn_executor:
self.system_panic(f"no executor for task {self.learn_task_name}", fl_ctx)
return
self.persistor = self.engine.get_component(self.persistor_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic(
f"Persistor {self.persistor_id} must be a Persistor instance, but got {type(self.persistor)}",
fl_ctx,
)
return
if self.shareable_generator_id:
self.shareable_generator = self.engine.get_component(self.shareable_generator_id)
if not isinstance(self.shareable_generator, ShareableGenerator):
self.system_panic(
f"Shareable generator {self.shareable_generator_id} must be a Shareable Generator instance, "
f"but got {type(self.shareable_generator)}",
fl_ctx,
)
return
self.initialize(fl_ctx)
if self.learn_task_name:
self.log_info(fl_ctx, "Started learn thread")
self.learn_thread.start()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.start_run(fl_ctx)
elif event_type == EventType.BEFORE_PULL_TASK:
# add my status to fl_ctx
if not self.workflow_id:
return
reports = fl_ctx.get_prop(Constant.STATUS_REPORTS)
if reports:
reports.pop(self.workflow_id, None)
if self.workflow_done:
return
report = self._get_status_report()
if not report:
self.log_info(fl_ctx, "nothing to report this time")
return
self._add_status_report(report, fl_ctx)
self.last_status_report_time = report.timestamp
elif event_type in [EventType.ABORT_TASK, EventType.END_RUN]:
if not self.asked_to_stop and not self.workflow_done:
self.asked_to_stop = True
self._abort_current_task(fl_ctx)
self.finalize(fl_ctx)
def _add_status_report(self, report: StatusReport, fl_ctx: FLContext):
reports = fl_ctx.get_prop(Constant.STATUS_REPORTS)
if not reports:
reports = {}
# set the prop as public, so it will be sent to the peer in peer_context
fl_ctx.set_prop(Constant.STATUS_REPORTS, reports, sticky=False, private=False)
reports[self.workflow_id] = report.to_dict()
def initialize(self, fl_ctx: FLContext):
"""Called to initialize the executor.
Args:
fl_ctx: The FL Context
Returns: None
"""
fl_ctx.set_prop(Constant.EXECUTOR, self, private=True, sticky=False)
self.fire_event(Constant.EXECUTOR_INITIALIZED, fl_ctx)
def finalize(self, fl_ctx: FLContext):
"""Called to finalize the executor.
Args:
fl_ctx: the FL Context
Returns: None
"""
with self.finalize_lock:
if self.workflow_done:
return
fl_ctx.set_prop(Constant.EXECUTOR, self, private=True, sticky=False)
fl_ctx.set_prop(FLContextKey.WORKFLOW, self.workflow_id, private=True, sticky=False)
self.fire_event(Constant.EXECUTOR_FINALIZED, fl_ctx)
self.workflow_done = True
def process_config(self, fl_ctx: FLContext):
"""This is called to allow the subclass to process config props.
Returns: None
"""
pass
def topic_for_my_workflow(self, base_topic: str):
return f"{base_topic}.{self.workflow_id}"
def broadcast_final_result(
self, fl_ctx: FLContext, result_type: str, result: Learnable, metric=None, round_num=None
):
error = None
targets = self.get_config_prop(Constant.RESULT_CLIENTS)
if not targets:
self.log_info(fl_ctx, f"no clients configured to receive final {result_type} result")
else:
try:
num_errors = self._try_broadcast_final_result(fl_ctx, result_type, result, metric, round_num)
if num_errors > 0:
error = ReturnCode.EXECUTION_EXCEPTION
except:
self.log_error(fl_ctx, f"exception broadcast final {result_type} result {secure_format_traceback()}")
error = ReturnCode.EXECUTION_EXCEPTION
if result_type == ResultType.BEST:
action = "finished_broadcast_best_result"
all_done = False
else:
action = "finished_broadcast_last_result"
all_done = True
self.update_status(action=action, error=error, all_done=all_done)
def _try_broadcast_final_result(
self, fl_ctx: FLContext, result_type: str, result: Learnable, metric=None, round_num=None
):
targets = self.get_config_prop(Constant.RESULT_CLIENTS)
assert isinstance(targets, list)
if self.me in targets:
targets.remove(self.me)
if len(targets) == 0:
# no targets to receive the result!
self.log_info(fl_ctx, f"no targets to receive {result_type} result")
return 0
shareable = Shareable()
shareable.set_header(Constant.RESULT_TYPE, result_type)
if metric is not None:
shareable.set_header(Constant.METRIC, metric)
if round_num is not None:
shareable.set_header(Constant.ROUND, round_num)
shareable[Constant.RESULT] = result
self.log_info(
fl_ctx, f"broadcasting {result_type} result with metric {metric} at round {round_num} to clients {targets}"
)
self.update_status(action=f"broadcast_{result_type}_result")
task = Task(
name=self.report_final_result_task_name,
data=shareable,
timeout=int(self.final_result_ack_timeout),
)
resp = self.broadcast_and_wait(
task=task,
targets=targets,
min_responses=len(targets),
fl_ctx=fl_ctx,
)
assert isinstance(resp, dict)
num_errors = 0
for t in targets:
reply = resp.get(t)
if not isinstance(reply, Shareable):
self.log_error(
fl_ctx,
f"bad response for {result_type} result from client {t}: "
f"reply must be Shareable but got {type(reply)}",
)
num_errors += 1
continue
rc = reply.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"bad response for {result_type} result from client {t}: {rc}")
num_errors += 1
if num_errors == 0:
self.log_info(fl_ctx, f"successfully broadcast {result_type} result to {targets}")
return num_errors
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self.configure_task_name:
self.config = shareable[Constant.CONFIG]
my_wf_id = self.get_config_prop(FLContextKey.WORKFLOW)
if not my_wf_id:
self.log_error(fl_ctx, "missing workflow id in configuration!")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
self.log_info(fl_ctx, f"got my workflow id {my_wf_id}")
self.workflow_id = my_wf_id
reply = self.process_config(fl_ctx)
self.engine.register_aux_message_handler(
topic=topic_for_end_workflow(my_wf_id),
message_handle_func=self._process_end_workflow,
)
if not reply:
reply = make_reply(ReturnCode.OK)
return reply
elif task_name == self.start_task_name:
self.is_starting_client = True
learnable = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, learnable, private=True, sticky=True)
initial_model = self.shareable_generator.learnable_to_shareable(learnable, fl_ctx)
return self.start_workflow(initial_model, fl_ctx, abort_signal)
elif task_name == self.do_learn_task_name:
return self._process_learn_request(shareable, fl_ctx)
elif task_name == self.report_final_result_task_name:
return self._process_final_result(shareable, fl_ctx)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
@abstractmethod
def start_workflow(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""
This is called for the subclass to start the workflow.
This only happens on the starting_client.
Args:
shareable: the initial task data (e.g. initial model weights)
fl_ctx: FL context
abort_signal: abort signal for task execution
Returns:
"""
pass
def _get_status_report(self):
with self.status_lock:
status = self.current_status
must_report = False
if status.error:
must_report = True
elif status.timestamp:
must_report = True
if not must_report:
return None
# do status report
report = copy.copy(status)
return report
def _abort_current_task(self, fl_ctx: FLContext):
current_task = self.learn_task
if not current_task:
return
current_task.abort_signal.trigger(True)
fl_ctx.set_prop(FLContextKey.TASK_NAME, current_task.task_name)
self.fire_event(EventType.ABORT_TASK, fl_ctx)
def set_learn_task(self, task_data: Shareable, fl_ctx: FLContext) -> bool:
with self.learn_task_lock:
task_data.set_header(AppConstants.NUM_ROUNDS, self.get_config_prop(AppConstants.NUM_ROUNDS))
task = _LearnTask(self.learn_task_name, task_data, fl_ctx)
current_task = self.learn_task
if not current_task:
self.learn_task = task
return True
if not self.allow_busy_task:
return False
# already has a task!
self.log_warning(fl_ctx, "already running a task: aborting it")
self._abort_current_task(fl_ctx)
# monitor until the task is done
start = time.time()
while self.learn_task:
if time.time() - start > self.learn_task_abort_timeout:
self.log_error(
fl_ctx, f"failed to stop the running task after {self.learn_task_abort_timeout} seconds"
)
return False
time.sleep(0.1)
self.learn_task = task
return True
def _do_learn(self):
while not self.asked_to_stop:
if self.learn_task:
t = self.learn_task
assert isinstance(t, _LearnTask)
self.logger.info(f"Got a Learn task {t.task_name}")
try:
self.do_learn_task(t.task_name, t.task_data, t.fl_ctx, t.abort_signal)
except:
self.logger.log(f"exception from do_learn_task: {secure_format_traceback()}")
self.learn_task = None
time.sleep(self.learn_task_check_interval)
def update_status(self, last_round=None, action=None, error=None, all_done=False):
with self.status_lock:
status = self.current_status
status.timestamp = time.time()
if all_done:
# once marked all_done, always all_done!
status.all_done = True
if error:
status.error = error
if action:
status.action = action
if status.last_round is None:
status.last_round = last_round
elif last_round is not None and last_round > status.last_round:
status.last_round = last_round
status_dict = status.to_dict()
self.logger.info(f"updated my last status: {status_dict}")
@abstractmethod
def do_learn_task(self, name: str, data: Shareable, fl_ctx: FLContext, abort_signal: Signal):
"""This is called to do a Learn Task.
Subclass must implement this method.
Args:
name: task name
data: task data
fl_ctx: FL context of the task
abort_signal: abort signal for the task execution
Returns:
"""
pass
def _process_final_result(self, request: Shareable, fl_ctx: FLContext) -> Shareable:
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
client_name = peer_ctx.get_identity_name()
result = request.get(Constant.RESULT)
metric = request.get_header(Constant.METRIC)
round_num = request.get_header(Constant.ROUND)
result_type = request.get_header(Constant.RESULT_TYPE)
if result_type not in [ResultType.BEST, ResultType.LAST]:
self.log_error(fl_ctx, f"Bad request from client {client_name}: invalid result type {result_type}")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
if not result:
self.log_error(fl_ctx, f"Bad request from client {client_name}: no result")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
if not isinstance(result, Learnable):
self.log_error(fl_ctx, f"Bad result from client {client_name}: expect Learnable but got {type(result)}")
return make_reply(ReturnCode.BAD_REQUEST_DATA)
self.log_info(fl_ctx, f"Got {result_type} from client {client_name} with metric {metric} at round {round_num}")
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, result, private=True, sticky=True)
if result_type == ResultType.BEST:
fl_ctx.set_prop(Constant.ROUND, round_num, private=True, sticky=False)
fl_ctx.set_prop(Constant.CLIENT, client_name, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, metric, private=True, sticky=False)
self.fire_event(AppEventType.GLOBAL_BEST_MODEL_AVAILABLE, fl_ctx)
else:
# last model
assert isinstance(self.persistor, LearnablePersistor)
self.persistor.save(result, fl_ctx)
return make_reply(ReturnCode.OK)
def _process_end_workflow(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
self.log_info(fl_ctx, f"ending workflow {self.get_config_prop(FLContextKey.WORKFLOW)}")
self.asked_to_stop = True
self._abort_current_task(fl_ctx)
self.finalize(fl_ctx)
return make_reply(ReturnCode.OK)
def _process_learn_request(self, request: Shareable, fl_ctx: FLContext) -> Shareable:
try:
return self._try_process_learn_request(request, fl_ctx)
except Exception as ex:
self.log_exception(fl_ctx, f"exception: {ex}")
self.update_status(action="process_learn_request", error=ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _try_process_learn_request(self, request: Shareable, fl_ctx: FLContext) -> Shareable:
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
sender = peer_ctx.get_identity_name()
# process request from prev client
self.log_info(fl_ctx, f"Got Learn request from {sender}")
if self.learn_task and not self.allow_busy_task:
# should never happen!
self.log_error(fl_ctx, f"got Learn request from {sender} while I'm still busy!")
self.update_status(action="process_learn_request", error=ReturnCode.EXECUTION_EXCEPTION)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.log_info(fl_ctx, f"accepted learn request from {sender}")
self.set_learn_task(task_data=request, fl_ctx=fl_ctx)
return make_reply(ReturnCode.OK)
def send_learn_task(self, targets: list, request: Shareable, fl_ctx: FLContext) -> bool:
self.log_info(fl_ctx, f"sending learn task to clients {targets}")
request.set_header(AppConstants.NUM_ROUNDS, self.get_config_prop(AppConstants.NUM_ROUNDS))
task = Task(
name=self.do_learn_task_name,
data=request,
timeout=int(self.learn_task_ack_timeout),
)
resp = self.broadcast_and_wait(
task=task,
targets=targets,
min_responses=len(targets),
fl_ctx=fl_ctx,
)
assert isinstance(resp, dict)
for t in targets:
reply = resp.get(t)
if not isinstance(reply, Shareable):
self.log_error(fl_ctx, f"failed to send learn request to client {t}")
self.log_error(fl_ctx, f"reply must be Shareable but got {type(reply)}")
self.update_status(action="send_learn_task", error=ReturnCode.EXECUTION_EXCEPTION)
return False
rc = reply.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"bad response for learn request from client {t}: {rc}")
self.update_status(action="send_learn_task", error=rc)
return False
return True
def execute_learn_task(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
current_round = data.get_header(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"started training round {current_round}")
try:
result = self.learn_executor.execute(self.learn_task_name, data, fl_ctx, abort_signal)
except:
self.log_exception(fl_ctx, f"trainer exception: {secure_format_traceback()}")
result = make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.log_info(fl_ctx, f"finished training round {current_round}")
# make sure to include cookies in result
cookie_jar = data.get_cookie_jar()
result.set_cookie_jar(cookie_jar)
result.set_header(AppConstants.CURRENT_ROUND, current_round)
result.add_cookie(AppConstants.CONTRIBUTION_ROUND, current_round) # to make model selector happy
return result
def record_last_result(
self,
fl_ctx: FLContext,
round_num: int,
result: Learnable,
):
if not isinstance(result, Learnable):
self.log_error(fl_ctx, f"result must be Learnable but got {type(result)}")
return
self.last_result = result
self.last_round = round_num
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, result, private=True, sticky=True)
if self.persistor:
self.log_info(fl_ctx, f"Saving result of round {round_num}")
self.persistor.save(result, fl_ctx)
| NVFlare-main | nvflare/app_common/ccwf/client_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from nvflare.apis.controller_spec import Task
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.app_common.ccwf.client_ctl import ClientSideController
from nvflare.app_common.ccwf.common import Constant, ModelType, make_task_name
from nvflare.fuel.utils.validation_utils import check_non_empty_str, check_positive_number
from nvflare.security.logging import secure_format_traceback
class CrossSiteEvalClientController(ClientSideController):
def __init__(
self,
task_name_prefix=Constant.TN_PREFIX_CROSS_SITE_EVAL,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
validation_task_name=AppConstants.TASK_VALIDATION,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID,
get_model_timeout=Constant.GET_MODEL_TIMEOUT,
):
check_positive_number("get_model_timeout", get_model_timeout)
check_non_empty_str("submit_model_task_name", submit_model_task_name)
check_non_empty_str("validation_task_name", validation_task_name)
check_non_empty_str("persistor_id", persistor_id)
super().__init__(
task_name_prefix=task_name_prefix,
learn_task_name="",
shareable_generator_id="",
persistor_id=persistor_id,
)
self.eval_task_name = make_task_name(task_name_prefix, Constant.BASENAME_EVAL)
self.ask_for_model_task_name = make_task_name(task_name_prefix, Constant.BASENAME_ASK_FOR_MODEL)
self.submit_model_task_name = submit_model_task_name # this is for the learner executor
self.validation_task_name = validation_task_name
self.my_local_model = None
self.global_model_inventory = None
self.submit_model_executor = None
self.validate_executor = None
self.inventory = None
self.get_model_timeout = get_model_timeout
self.local_model = None
self.model_lock = threading.Lock()
def start_run(self, fl_ctx: FLContext):
super().start_run(fl_ctx)
runner = fl_ctx.get_prop(FLContextKey.RUNNER)
if self.submit_model_task_name:
self.submit_model_executor = runner.find_executor(self.submit_model_task_name)
if not self.submit_model_executor:
self.system_panic(f"no executor for task {self.submit_model_task_name}", fl_ctx)
return
if self.validation_task_name:
self.validate_executor = runner.find_executor(self.validation_task_name)
if not self.validate_executor:
self.system_panic(f"no executor for task {self.validation_task_name}", fl_ctx)
return
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self.eval_task_name:
# server assigned task
return self.do_eval(shareable, fl_ctx, abort_signal)
elif task_name == self.ask_for_model_task_name:
# client-assigned task
return self._process_get_model_request(shareable, fl_ctx)
return super().execute(task_name, shareable, fl_ctx, abort_signal)
def process_config(self, fl_ctx: FLContext):
eval_local = self.get_config_prop(Constant.EVAL_LOCAL)
eval_global = self.get_config_prop(Constant.EVAL_GLOBAL)
evaluators = self.get_config_prop(Constant.EVALUATORS)
evaluatees = self.get_config_prop(Constant.EVALUATEES)
global_client = self.get_config_prop(Constant.GLOBAL_CLIENT)
if eval_local and self.me in evaluatees:
# do I have any local model?
if not self.submit_model_executor:
return make_reply(Constant.RC_NO_LOCAL_MODEL)
if self.me in evaluators:
# I am required to evaluate others
if not self.validate_executor:
return make_reply(Constant.RC_UNABLE_TO_EVAL)
reply = make_reply(ReturnCode.OK)
if eval_global and self.me == global_client:
# do I have global models?
assert isinstance(self.persistor, ModelPersistor)
self.inventory = self.persistor.get_model_inventory(fl_ctx)
if self.inventory:
assert isinstance(self.inventory, dict)
reply[Constant.GLOBAL_NAMES] = list(self.inventory.keys())
return reply
def start_workflow(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
pass
def do_eval(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
model_type = shareable.get(Constant.MODEL_TYPE)
model_owner = shareable.get(Constant.MODEL_OWNER)
model_name = shareable.get(Constant.MODEL_NAME)
current_round = shareable.get(AppConstants.CURRENT_ROUND)
# ask the model owner for model
req = Shareable()
req[Constant.MODEL_NAME] = model_name
req[Constant.MODEL_TYPE] = model_type
if not self.validate_executor:
self.log_error(fl_ctx, "got eval request but I don't have a validator")
return make_reply(Constant.RC_UNABLE_TO_EVAL)
self.update_status(action="eval:get_model", last_round=current_round)
self.log_info(fl_ctx, f"asking client {model_owner} for model {model_type} {model_name}")
task = Task(
name=self.ask_for_model_task_name,
data=req,
timeout=int(self.get_model_timeout),
)
resp = self.broadcast_and_wait(
task=task,
targets=[model_owner],
min_responses=1,
fl_ctx=fl_ctx,
)
assert isinstance(resp, dict)
reply = resp.get(model_owner)
if not reply:
self.log_error(fl_ctx, f"failed to ask client {model_owner} for model")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
if not isinstance(reply, Shareable):
self.log_error(fl_ctx, f"client {model_owner} failed to respond to get-model request")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
rc = reply.get_return_code()
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"client {model_owner} failed to respond to share final result request: {rc}")
return make_reply(rc)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
model_to_validate = reply
model_to_validate.set_header(AppConstants.VALIDATE_TYPE, ValidateType.MODEL_VALIDATE)
if model_type == ModelType.LOCAL:
model_to_validate.set_header(AppConstants.MODEL_OWNER, model_owner)
self.update_status(action="eval:validate", last_round=current_round)
result = self.validate_executor.execute(
task_name=self.validation_task_name, shareable=model_to_validate, abort_signal=abort_signal, fl_ctx=fl_ctx
)
self.update_status(action="eval:finished", last_round=current_round)
assert isinstance(result, Shareable)
result.set_header(Constant.MODEL_TYPE, model_type)
result.set_header(Constant.MODEL_NAME, model_name)
result.set_header(Constant.MODEL_OWNER, model_owner)
result.set_header(AppConstants.CURRENT_ROUND, current_round)
return result
def _process_get_model_request(self, request: Shareable, fl_ctx: FLContext) -> Shareable:
with self.model_lock:
return self._do_process_get_model_request(request, fl_ctx)
def _do_process_get_model_request(self, request: Shareable, fl_ctx: FLContext) -> Shareable:
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
client_name = peer_ctx.get_identity_name()
model_type = request.get(Constant.MODEL_TYPE)
model_name = request.get(Constant.MODEL_NAME)
if model_type == ModelType.GLOBAL:
# get it from model inventory
if not self.inventory:
self.log_error(
fl_ctx, f"got request for global model from client {client_name} but I don't have global models"
)
return make_reply(ReturnCode.BAD_REQUEST_DATA)
assert isinstance(self.persistor, ModelPersistor)
model_learnable = self.persistor.get(model_name, fl_ctx)
dxo = model_learnable_to_dxo(model_learnable)
self.log_info(fl_ctx, f"sent global model {model_name} to client {client_name}")
return dxo.to_shareable()
# local model
if not self.submit_model_executor:
self.log_error(
fl_ctx, f"got request for local model from client {client_name} but I don't have local models"
)
return make_reply(ReturnCode.BAD_REQUEST_DATA)
if not self.local_model:
task_data = Shareable()
task_data.set_header(AppConstants.SUBMIT_MODEL_NAME, model_name)
abort_signal = Signal()
try:
result = self.submit_model_executor.execute(
task_name=self.submit_model_task_name, shareable=task_data, fl_ctx=fl_ctx, abort_signal=abort_signal
)
except:
self.log_error(
fl_ctx, f"failed to get local model from submit_model_executor: {secure_format_traceback()}"
)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
assert isinstance(result, Shareable)
rc = result.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"failed to get local model from submit_model_executor: {rc}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.local_model = result
self.log_info(fl_ctx, f"sent local model {model_name} to client {client_name}")
return self.local_model
def do_learn_task(self, name: str, task_data: Shareable, fl_ctx: FLContext, abort_signal: Signal):
pass
| NVFlare-main | nvflare/app_common/ccwf/cse_client_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.ccwf.client_ctl import ClientSideController
from nvflare.app_common.ccwf.common import Constant, CyclicOrder, ResultType, rotate_to_front
from nvflare.fuel.utils.validation_utils import check_non_empty_str
class CyclicClientController(ClientSideController):
def __init__(
self,
task_name_prefix=Constant.TN_PREFIX_CYCLIC,
learn_task_name=AppConstants.TASK_TRAIN,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID,
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
learn_task_check_interval=Constant.LEARN_TASK_CHECK_INTERVAL,
learn_task_abort_timeout=Constant.LEARN_TASK_ABORT_TIMEOUT,
learn_task_ack_timeout=Constant.LEARN_TASK_ACK_TIMEOUT,
final_result_ack_timeout=Constant.FINAL_RESULT_ACK_TIMEOUT,
):
check_non_empty_str("learn_task_name", learn_task_name)
check_non_empty_str("persistor_id", persistor_id)
check_non_empty_str("shareable_generator_id", shareable_generator_id)
super().__init__(
task_name_prefix=task_name_prefix,
learn_task_name=learn_task_name,
persistor_id=persistor_id,
shareable_generator_id=shareable_generator_id,
learn_task_check_interval=learn_task_check_interval,
learn_task_abort_timeout=learn_task_abort_timeout,
learn_task_ack_timeout=learn_task_ack_timeout,
final_result_ack_timeout=final_result_ack_timeout,
allow_busy_task=False,
)
@staticmethod
def _set_task_headers(task_data: Shareable, num_rounds, current_round, client_order):
task_data.set_header(AppConstants.NUM_ROUNDS, num_rounds)
task_data.set_header(AppConstants.CURRENT_ROUND, current_round)
task_data.set_header(Constant.CLIENT_ORDER, client_order)
def start_workflow(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
clients = self.get_config_prop(Constant.CLIENTS)
# make sure the starting client is the 1st
rotate_to_front(self.me, clients)
rr_order = self.get_config_prop(Constant.ORDER)
self.log_info(fl_ctx, f"Starting cyclic workflow on clients {clients} with order {rr_order} ")
self._set_task_headers(
task_data=shareable,
num_rounds=self.get_config_prop(AppConstants.NUM_ROUNDS),
current_round=self.get_config_prop(Constant.START_ROUND, 0),
client_order=clients,
)
self.set_learn_task(task_data=shareable, fl_ctx=fl_ctx)
return make_reply(ReturnCode.OK)
def do_learn_task(self, name: str, data: Shareable, fl_ctx: FLContext, abort_signal: Signal):
# set status report of starting task
current_round = data.get_header(AppConstants.CURRENT_ROUND)
self.update_status(
last_round=current_round,
action="start_learn_task",
)
# need to prepare the GLOBAL_MODEL prop in case the shareable generator needs it
# for shareable_to_learnable after training.
# Note: the "data" shareable contains full weight before training.
# However, the training process may only return weight diffs. To convert to full weights again,
# the original weights (GLOBAL_MODEL prop) are needed.
global_weights = self.shareable_generator.shareable_to_learnable(data, fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, global_weights, private=True, sticky=True)
# execute the task
result = self.execute_learn_task(data, fl_ctx, abort_signal)
rc = result.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"learn executor failed: {rc}")
self.update_status(action="do_learn_task", error=rc)
return
self.last_result = result
self.last_round = current_round
# see whether we need to send to next leg
num_rounds = data.get_header(AppConstants.NUM_ROUNDS)
current_round = data.get_header(AppConstants.CURRENT_ROUND)
client_order = data.get_header(Constant.CLIENT_ORDER)
all_done = False
assert isinstance(client_order, list)
my_idx = client_order.index(self.me)
if my_idx == len(client_order) - 1:
# I'm the last leg
num_rounds_done = current_round - self.get_config_prop(Constant.START_ROUND, 0) + 1
if num_rounds_done >= num_rounds:
# The RR is done!
self.log_info(fl_ctx, f"Cyclic Done: number of rounds completed {num_rounds_done}")
all_done = True
else:
# decide the next round order
cyclic_order = self.get_config_prop(Constant.ORDER)
if cyclic_order == CyclicOrder.RANDOM:
random.shuffle(client_order)
# make sure I'm not the first in the new order
if client_order[0] == self.me:
# put me at the end
client_order.pop(0)
client_order.append(self.me)
result.set_header(Constant.CLIENT_ORDER, client_order)
current_round += 1
self.log_info(fl_ctx, f"Starting new round {current_round} on clients: {client_order}")
last_learnable = self.shareable_generator.shareable_to_learnable(result, fl_ctx)
if all_done:
self.record_last_result(fl_ctx, self.last_round, last_learnable)
self.broadcast_final_result(fl_ctx, ResultType.LAST, last_learnable, round_num=self.last_round)
return
# send to next leg
if my_idx < len(client_order) - 1:
next_client = client_order[my_idx + 1]
else:
next_client = client_order[0]
next_task_data = self.shareable_generator.learnable_to_shareable(last_learnable, fl_ctx)
self._set_task_headers(next_task_data, num_rounds, current_round, client_order)
sent = self.send_learn_task(
targets=[next_client],
request=next_task_data,
fl_ctx=fl_ctx,
)
if sent:
self.log_info(fl_ctx, f"sent learn request to next client {next_client}")
| NVFlare-main | nvflare/app_common/ccwf/cyclic_client_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.ccwf.common import Constant
from nvflare.app_common.ccwf.server_ctl import ServerSideController
from nvflare.fuel.utils.validation_utils import DefaultValuePolicy, normalize_config_arg, validate_candidates
class SwarmServerController(ServerSideController):
def __init__(
self,
num_rounds: int,
start_round: int = 0,
task_name_prefix=Constant.TN_PREFIX_SWARM,
start_task_timeout=Constant.START_TASK_TIMEOUT,
configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT,
task_check_period: float = Constant.TASK_CHECK_INTERVAL,
job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL,
participating_clients=None,
result_clients=None,
starting_client: str = "",
max_status_report_interval: float = Constant.PER_CLIENT_STATUS_REPORT_TIMEOUT,
progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT,
aggr_clients=None,
train_clients=None,
):
result_clients = normalize_config_arg(result_clients)
starting_client = normalize_config_arg(starting_client)
if starting_client is None:
raise ValueError("starting_client must be specified")
super().__init__(
num_rounds=num_rounds,
start_round=start_round,
task_name_prefix=task_name_prefix,
start_task_timeout=start_task_timeout,
configure_task_timeout=configure_task_timeout,
task_check_period=task_check_period,
job_status_check_interval=job_status_check_interval,
participating_clients=participating_clients,
result_clients=result_clients,
result_clients_policy=DefaultValuePolicy.ALL,
starting_client=starting_client,
starting_client_policy=DefaultValuePolicy.ANY,
max_status_report_interval=max_status_report_interval,
progress_timeout=progress_timeout,
)
if not train_clients:
train_clients = []
if not aggr_clients:
aggr_clients = []
self.aggr_clients = aggr_clients
self.train_clients = train_clients
def start_controller(self, fl_ctx: FLContext):
super().start_controller(fl_ctx)
self.train_clients = validate_candidates(
var_name="train_clients",
candidates=self.train_clients,
base=self.participating_clients,
default_policy=DefaultValuePolicy.ALL,
allow_none=False,
)
self.aggr_clients = validate_candidates(
var_name="aggr_clients",
candidates=self.aggr_clients,
base=self.participating_clients,
default_policy=DefaultValuePolicy.ALL,
allow_none=False,
)
# make sure every participating client is either training or aggr client
for c in self.participating_clients:
if c not in self.train_clients and c not in self.aggr_clients:
raise RuntimeError(f"Config Error: client {c} is neither train client nor aggr client")
def prepare_config(self):
return {Constant.AGGR_CLIENTS: self.aggr_clients, Constant.TRAIN_CLIENTS: self.train_clients}
| NVFlare-main | nvflare/app_common/ccwf/swarm_server_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from datetime import datetime
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import ReturnCode, Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.ccwf.common import (
Constant,
StatusReport,
make_task_name,
status_report_from_dict,
topic_for_end_workflow,
)
from nvflare.fuel.utils.validation_utils import (
DefaultValuePolicy,
check_number_range,
check_positive_int,
check_positive_number,
check_str,
normalize_config_arg,
validate_candidate,
validate_candidates,
)
from nvflare.security.logging import secure_format_traceback
class ClientStatus:
def __init__(self):
self.ready_time = None
self.last_report_time = time.time()
self.last_progress_time = time.time()
self.num_reports = 0
self.status = StatusReport()
class ServerSideController(Controller):
def __init__(
self,
num_rounds: int,
start_round: int = 0,
task_name_prefix: str = "wf",
configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT,
end_workflow_timeout=Constant.END_WORKFLOW_TIMEOUT,
start_task_timeout=Constant.START_TASK_TIMEOUT,
task_check_period: float = Constant.TASK_CHECK_INTERVAL,
job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL,
starting_client=None,
starting_client_policy: str = DefaultValuePolicy.ANY,
participating_clients=None,
result_clients=None,
result_clients_policy: str = DefaultValuePolicy.ALL,
max_status_report_interval: float = Constant.PER_CLIENT_STATUS_REPORT_TIMEOUT,
progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT,
):
"""
Constructor
Args:
num_rounds:
start_round:
task_name_prefix:
configure_task_timeout:
end_workflow_timeout:
start_task_timeout:
task_check_period:
job_status_check_interval:
starting_client:
participating_clients:
result_clients: clients to receive final results
max_status_report_interval:
progress_timeout:
"""
Controller.__init__(self, task_check_period)
participating_clients = normalize_config_arg(participating_clients)
if participating_clients is None:
raise ValueError("participating_clients must not be empty")
self.task_name_prefix = task_name_prefix
self.configure_task_name = make_task_name(task_name_prefix, Constant.BASENAME_CONFIG)
self.configure_task_timeout = configure_task_timeout
self.start_task_name = make_task_name(task_name_prefix, Constant.BASENAME_START)
self.start_task_timeout = start_task_timeout
self.end_workflow_timeout = end_workflow_timeout
self.num_rounds = num_rounds
self.start_round = start_round
self.max_status_report_interval = max_status_report_interval
self.progress_timeout = progress_timeout
self.job_status_check_interval = job_status_check_interval
self.starting_client = starting_client
self.starting_client_policy = starting_client_policy
self.participating_clients = participating_clients
self.result_clients = result_clients
self.result_clients_policy = result_clients_policy
self.client_statuses = {} # client name => ClientStatus
self.cw_started = False
self.asked_to_stop = False
self.workflow_id = None
check_positive_int("num_rounds", num_rounds)
check_number_range("configure_task_timeout", configure_task_timeout, min_value=1)
check_number_range("end_workflow_timeout", end_workflow_timeout, min_value=1)
check_positive_number("job_status_check_interval", job_status_check_interval)
check_number_range("max_status_report_interval", max_status_report_interval, min_value=10.0)
check_number_range("progress_timeout", progress_timeout, min_value=5.0)
check_str("starting_client_policy", starting_client_policy)
if participating_clients and len(participating_clients) < 2:
raise ValueError(f"Not enough participating_clients: must > 1, but got {participating_clients}")
def start_controller(self, fl_ctx: FLContext):
wf_id = fl_ctx.get_prop(FLContextKey.WORKFLOW)
self.log_debug(fl_ctx, f"starting controller for workflow {wf_id}")
if not wf_id:
raise RuntimeError("workflow ID is missing from FL context")
self.workflow_id = wf_id
all_clients = self._engine.get_clients()
if len(all_clients) < 2:
raise RuntimeError(f"this workflow requires at least 2 clients, but only got {all_clients}")
all_client_names = [t.name for t in all_clients]
self.participating_clients = validate_candidates(
var_name="participating_clients",
candidates=self.participating_clients,
base=all_client_names,
default_policy=DefaultValuePolicy.ALL,
allow_none=False,
)
self.starting_client = validate_candidate(
var_name="starting_client",
candidate=self.starting_client,
base=self.participating_clients,
default_policy=self.starting_client_policy,
allow_none=True,
)
self.result_clients = validate_candidates(
var_name="result_clients",
candidates=self.result_clients,
base=self.participating_clients,
default_policy=self.result_clients_policy,
allow_none=True,
)
for c in self.participating_clients:
self.client_statuses[c] = ClientStatus()
def prepare_config(self) -> dict:
return {}
def sub_flow(self, abort_signal: Signal, fl_ctx: FLContext):
pass
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
# wait for every client to become ready
self.log_info(fl_ctx, f"Waiting for clients to be ready: {self.participating_clients}")
# GET STARTED
self.log_info(fl_ctx, f"Configuring clients {self.participating_clients} for workflow {self.workflow_id}")
learn_config = {
Constant.TASK_NAME_PREFIX: self.task_name_prefix,
Constant.CLIENTS: self.participating_clients,
Constant.START_CLIENT: self.starting_client,
Constant.RESULT_CLIENTS: self.result_clients,
AppConstants.NUM_ROUNDS: self.num_rounds,
Constant.START_ROUND: self.start_round,
FLContextKey.WORKFLOW: self.workflow_id,
}
extra_config = self.prepare_config()
if extra_config:
learn_config.update(extra_config)
self.log_info(fl_ctx, f"Workflow Config: {learn_config}")
# configure all clients
shareable = Shareable()
shareable[Constant.CONFIG] = learn_config
task = Task(
name=self.configure_task_name,
data=shareable,
timeout=self.configure_task_timeout,
result_received_cb=self._process_configure_reply,
)
self.log_info(fl_ctx, f"sending task {self.configure_task_name} to clients {self.participating_clients}")
start_time = time.time()
self.broadcast_and_wait(
task=task,
targets=self.participating_clients,
min_responses=len(self.participating_clients),
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
time_taken = time.time() - start_time
self.log_info(fl_ctx, f"client configuration took {time_taken} seconds")
failed_clients = []
for c, cs in self.client_statuses.items():
assert isinstance(cs, ClientStatus)
if not cs.ready_time:
failed_clients.append(c)
if failed_clients:
self.system_panic(
f"failed to configure clients {failed_clients}",
fl_ctx,
)
return
self.log_info(fl_ctx, f"successfully configured clients {self.participating_clients}")
# starting the starting_client
if self.starting_client:
shareable = Shareable()
task = Task(
name=self.start_task_name,
data=shareable,
timeout=self.start_task_timeout,
result_received_cb=self._process_start_reply,
)
self.log_info(fl_ctx, f"sending task {self.start_task_name} to client {self.starting_client}")
self.send_and_wait(
task=task,
targets=[self.starting_client],
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
if not self.cw_started:
self.system_panic(
f"failed to start workflow {self.workflow_id} on client {self.starting_client}",
fl_ctx,
)
return
self.log_info(fl_ctx, f"started workflow {self.workflow_id} on client {self.starting_client}")
# a subclass could provide additional control flow
self.sub_flow(abort_signal, fl_ctx)
self.log_info(fl_ctx, f"Waiting for clients to finish workflow {self.workflow_id} ...")
while not abort_signal.triggered and not self.asked_to_stop:
time.sleep(self.job_status_check_interval)
done = self._check_job_status(fl_ctx)
if done:
break
self.log_info(fl_ctx, f"Workflow {self.workflow_id} finished on all clients")
# ask all clients to end the workflow
self.log_info(fl_ctx, f"asking all clients to end workflow {self.workflow_id}")
engine = fl_ctx.get_engine()
end_wf_request = Shareable()
resp = engine.send_aux_request(
targets=self.participating_clients,
topic=topic_for_end_workflow(self.workflow_id),
request=end_wf_request,
timeout=self.end_workflow_timeout,
fl_ctx=fl_ctx,
secure=False,
)
assert isinstance(resp, dict)
num_errors = 0
for c in self.participating_clients:
reply = resp.get(c)
if not reply:
self.log_error(fl_ctx, f"not reply from client {c} for ending workflow {self.workflow_id}")
num_errors += 1
continue
assert isinstance(reply, Shareable)
rc = reply.get_return_code(ReturnCode.OK)
if rc != ReturnCode.OK:
self.log_error(fl_ctx, f"client {c} failed to end workflow {self.workflow_id}: {rc}")
num_errors += 1
if num_errors > 0:
self.system_panic(f"failed to end workflow {self.workflow_id} on all clients", fl_ctx)
self.log_info(fl_ctx, f"Workflow {self.workflow_id} done!")
def process_task_request(self, client: Client, fl_ctx: FLContext):
self._update_client_status(fl_ctx)
return super().process_task_request(client, fl_ctx)
def process_config_reply(self, client_name: str, reply: Shareable, fl_ctx: FLContext) -> bool:
return True
def _process_configure_reply(self, client_task: ClientTask, fl_ctx: FLContext):
result = client_task.result
client_name = client_task.client.name
rc = result.get_return_code()
if rc == ReturnCode.OK:
self.log_info(fl_ctx, f"successfully configured client {client_name}")
try:
ok = self.process_config_reply(client_name, result, fl_ctx)
if not ok:
return
except:
self.log_error(
fl_ctx, f"exception processing config reply from client {client_name}: {secure_format_traceback()}"
)
return
cs = self.client_statuses.get(client_name)
if cs:
assert isinstance(cs, ClientStatus)
cs.ready_time = time.time()
else:
error = result.get(Constant.ERROR, "?")
self.log_error(fl_ctx, f"client {client_task.client.name} failed to configure: {rc}: {error}")
def client_started(self, client_task: ClientTask, fl_ctx: FLContext):
return True
def _process_start_reply(self, client_task: ClientTask, fl_ctx: FLContext):
result = client_task.result
client_name = client_task.client.name
rc = result.get_return_code()
if rc == ReturnCode.OK:
try:
ok = self.client_started(client_task, fl_ctx)
if not ok:
return
except:
self.log_info(fl_ctx, f"exception in client_started: {secure_format_traceback()}")
return
self.cw_started = True
else:
error = result.get(Constant.ERROR, "?")
self.log_error(
fl_ctx, f"client {client_task.client.name} couldn't start workflow {self.workflow_id}: {rc}: {error}"
)
def is_sub_flow_done(self, fl_ctx: FLContext) -> bool:
return False
def _check_job_status(self, fl_ctx: FLContext):
# see whether the server side thinks it's done
if self.is_sub_flow_done(fl_ctx):
return True
now = time.time()
overall_last_progress_time = 0.0
for client_name, cs in self.client_statuses.items():
assert isinstance(cs, ClientStatus)
assert isinstance(cs.status, StatusReport)
if cs.status.all_done:
self.log_info(fl_ctx, f"Got ALL_DONE from client {client_name}")
return True
if now - cs.last_report_time > self.max_status_report_interval:
self.system_panic(
f"client {client_name} didn't report status for {self.max_status_report_interval} seconds",
fl_ctx,
)
return True
if overall_last_progress_time < cs.last_progress_time:
overall_last_progress_time = cs.last_progress_time
if time.time() - overall_last_progress_time > self.progress_timeout:
self.system_panic(
f"the workflow {self.workflow_id} has no progress for {self.progress_timeout} seconds",
fl_ctx,
)
return True
return False
def _update_client_status(self, fl_ctx: FLContext):
peer_ctx = fl_ctx.get_peer_context()
assert isinstance(peer_ctx, FLContext)
client_name = peer_ctx.get_identity_name()
if client_name not in self.client_statuses:
self.log_error(fl_ctx, f"received result from unknown client {client_name}!")
return
# see whether status is available
reports = peer_ctx.get_prop(Constant.STATUS_REPORTS)
if not reports:
self.log_info(fl_ctx, f"no status report from client {client_name}")
return
my_report = reports.get(self.workflow_id)
if not my_report:
return
report = status_report_from_dict(my_report)
cs = self.client_statuses[client_name]
assert isinstance(cs, ClientStatus)
now = time.time()
cs.last_report_time = now
cs.num_reports += 1
if report.error:
self.asked_to_stop = True
self.system_panic(f"received failure report from client {client_name}: {report.error}", fl_ctx)
return
if cs.status != report:
# updated
cs.status = report
cs.last_progress_time = now
timestamp = datetime.fromtimestamp(report.timestamp) if report.timestamp else False
self.log_info(
fl_ctx,
f"updated status of client {client_name} on round {report.last_round}: "
f"timestamp={timestamp}, action={report.action}, all_done={report.all_done}",
)
else:
self.log_debug(
fl_ctx, f"ignored status report from client {client_name} at round {report.last_round}: no change"
)
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
self.log_warning(fl_ctx, f"ignored unknown task {task_name} from client {client.name}")
def stop_controller(self, fl_ctx: FLContext):
pass
| NVFlare-main | nvflare/app_common/ccwf/server_ctl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.app_common.ccwf.cse_client_ctl import CrossSiteEvalClientController
from nvflare.app_common.ccwf.cse_server_ctl import CrossSiteEvalServerController
from nvflare.app_common.ccwf.cyclic_client_ctl import CyclicClientController
from nvflare.app_common.ccwf.cyclic_server_ctl import CyclicServerController
from nvflare.app_common.ccwf.swarm_client_ctl import SwarmClientController
from nvflare.app_common.ccwf.swarm_server_ctl import SwarmServerController
| NVFlare-main | nvflare/app_common/ccwf/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from nvflare.app_common.abstract.metric_comparator import MetricComparator
class Constant:
TN_PREFIX_CYCLIC = "cyclic"
TN_PREFIX_SWARM = "swarm"
TN_PREFIX_CROSS_SITE_EVAL = "cse"
BASENAME_CONFIG = "config"
BASENAME_START = "start"
BASENAME_LEARN = "learn"
BASENAME_EVAL = "eval"
BASENAME_REPORT_LEARN_RESULT = "report_learn_result"
BASENAME_REPORT_FINAL_RESULT = "report_final_result"
BASENAME_ASK_FOR_MODEL = "ask_for_model"
TASK_NAME_PREFIX = "cwf.task_prefix"
ERROR = "cwf.error"
ORDER = "cwf.order"
CLIENTS = "cwf.clients"
START_CLIENT = "cwf.start_client"
RESULT_CLIENTS = "cwf.result_clients"
CLIENT_ORDER = "cwf.client_order"
LAST_ROUND = "cwf.last_round"
START_ROUND = "cwf.start_round"
TIMESTAMP = "cwf.timestamp"
ACTION = "cwf.action"
ALL_DONE = "cwf.all_done"
AGGR_CLIENTS = "cwf.aggr_clients"
TRAIN_CLIENTS = "cwf.train_clients"
AGGREGATOR = "cwf.aggr"
METRIC = "cwf.metric"
CLIENT = "cwf.client"
ROUND = "cwf.round"
CONFIG = "cwf.config"
STATUS_REPORTS = "cwf.status_reports"
RESULT = "cwf.result"
RESULT_TYPE = "cwf.result_type"
EVAL_LOCAL = "cwf.eval_local"
EVAL_GLOBAL = "cwf.eval_global"
EVALUATORS = "cwf.evaluators"
EVALUATEES = "cwf.evaluatees"
GLOBAL_CLIENT = "cwf.global_client"
MODEL_OWNER = "cwf.model_owner"
MODEL_NAME = "cwf.model_name"
MODEL_TYPE = "cwf.model_type"
GLOBAL_NAMES = "cwf.global_names"
EXECUTOR = "cwf.executor"
EXECUTOR_INITIALIZED = "cwf.executor_initialized"
EXECUTOR_FINALIZED = "cwf.executor_finalized"
TOPIC_SHARE_RESULT = "cwf.share_result"
TOPIC_END_WORKFLOW = "cwf.end_wf"
RC_NO_GLOBAL_MODELS = "cwf.no_global_models"
RC_NO_LOCAL_MODEL = "cwf.no_local_model"
RC_UNABLE_TO_EVAL = "cwf.unable_to_eval"
CONFIG_TASK_TIMEOUT = 300
START_TASK_TIMEOUT = 10
END_WORKFLOW_TIMEOUT = 2.0
TASK_CHECK_INTERVAL = 0.5
JOB_STATUS_CHECK_INTERVAL = 2.0
PER_CLIENT_STATUS_REPORT_TIMEOUT = 90.0
WORKFLOW_PROGRESS_TIMEOUT = 3600.0
LEARN_TASK_CHECK_INTERVAL = 1.0
LEARN_TASK_ACK_TIMEOUT = 10
LEARN_TASK_ABORT_TIMEOUT = 5.0
FINAL_RESULT_ACK_TIMEOUT = 10
GET_MODEL_TIMEOUT = 10
class ModelType:
LOCAL = "local"
GLOBAL = "global"
class ResultType:
BEST = "best"
LAST = "last"
class CyclicOrder:
FIXED = "fixed"
RANDOM = "random"
class StatusReport:
def __init__(
self,
timestamp=None,
action: str = "",
last_round=None,
all_done=False,
error: str = "",
):
self.timestamp = timestamp
self.action = action
self.last_round = last_round
self.all_done = all_done
self.error = error
def to_dict(self) -> dict:
result = {
Constant.TIMESTAMP: self.timestamp,
Constant.ACTION: self.action,
Constant.ALL_DONE: self.all_done,
}
if self.last_round is not None:
result[Constant.LAST_ROUND] = self.last_round
if self.error:
result[Constant.ERROR] = self.error
return result
def __eq__(self, other):
if not isinstance(other, StatusReport):
# don't attempt to compare against unrelated types
return ValueError(f"cannot compare to object of type {type(other)}")
return (
self.last_round == other.last_round
and self.timestamp == other.timestamp
and self.action == other.action
and self.all_done == other.all_done
and self.error == other.error
)
def status_report_from_dict(d: dict) -> StatusReport:
last_round = d.get(Constant.LAST_ROUND)
timestamp = d.get(Constant.TIMESTAMP)
all_done = d.get(Constant.ALL_DONE)
error = d.get(Constant.ERROR)
action = d.get(Constant.ACTION)
return StatusReport(
last_round=last_round,
timestamp=timestamp,
action=action,
all_done=all_done,
error=error,
)
def rotate_to_front(item, items: list):
num_items = len(items)
idx = items.index(item)
if idx != 0:
new_list = [None] * num_items
for i in range(num_items):
new_pos = i - idx
if new_pos < 0:
new_pos += num_items
new_list[new_pos] = items[i]
for i in range(num_items):
items[i] = new_list[i]
def topic_for_end_workflow(wf_id):
return f"{Constant.TOPIC_END_WORKFLOW}.{wf_id}"
def make_task_name(prefix: str, base_name: str) -> str:
return f"{prefix}_{base_name}"
class NumberMetricComparator(MetricComparator):
def compare(self, a, b) -> Union[int, float]:
if not isinstance(a, (int, float)):
raise ValueError(f"metric value must be a number but got {type(a)}")
if not isinstance(b, (int, float)):
raise ValueError(f"metric value must be a number but got {type(b)}")
return a - b
| NVFlare-main | nvflare/app_common/ccwf/common.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import threading
from nvflare.apis.dxo import DXO
class EvalResultManager:
def __init__(self, result_dir: str):
self.result_dir = result_dir
self.results = {}
self.update_lock = threading.Lock()
if not os.path.exists(result_dir):
# create
os.makedirs(result_dir)
def add_result(self, evaluator: str, evaluatee: str, result: DXO):
with self.update_lock:
save_file_name = evaluator + "-" + evaluatee
file_path = self._save_validation_result(save_file_name, result)
if evaluator not in self.results:
self.results[evaluator] = {}
self.results[evaluator][evaluatee] = file_path
return file_path
def _save_validation_result(self, file_name, result):
file_path = os.path.join(self.result_dir, file_name)
bytes_to_save = result.to_bytes()
with open(file_path, "wb") as f:
f.write(bytes_to_save)
return file_path
def get_results(self):
with self.update_lock:
return copy.deepcopy(self.results)
| NVFlare-main | nvflare/app_common/ccwf/val_result_manager.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.widgets.widget import Widget
class SimpleIntimeModelSelector(Widget):
def __init__(
self, weigh_by_local_iter=False, aggregation_weights=None, validation_metric_name=MetaKey.INITIAL_METRICS
):
"""Handler to determine if the model is globally best.
Args:
weigh_by_local_iter (bool, optional): whether the metrics should be weighted by trainer's iteration number.
aggregation_weights (dict, optional): a mapping of client name to float for aggregation. Defaults to None.
validation_metric_name (str, optional): key used to save initial validation metric in the DXO meta properties (defaults to MetaKey.INITIAL_METRICS).
"""
super().__init__()
self.val_metric = self.best_val_metric = -np.inf
self.weigh_by_local_iter = weigh_by_local_iter
self.validation_metric_name = validation_metric_name
self.aggregation_weights = aggregation_weights or {}
self.logger.info(f"model selection weights control: {aggregation_weights}")
self._reset_stats()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._startup()
elif event_type == AppEventType.ROUND_STARTED:
self._reset_stats()
elif event_type == AppEventType.BEFORE_CONTRIBUTION_ACCEPT:
self._before_accept(fl_ctx)
elif event_type == AppEventType.BEFORE_AGGREGATION:
self._before_aggregate(fl_ctx)
def _startup(self):
self._reset_stats()
def _reset_stats(self):
self.validation_metric_weighted_sum = 0
self.validation_metric_sum_of_weights = 0
def _before_accept(self, fl_ctx: FLContext):
peer_ctx = fl_ctx.get_peer_context()
shareable: Shareable = fl_ctx.get_prop(AppConstants.TRAINING_RESULT)
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS, DataKind.COLLECTION):
self.log_debug(fl_ctx, "cannot handle {}".format(dxo.data_kind))
return False
if dxo.data is None:
self.log_debug(fl_ctx, "no data to filter")
return False
contribution_round = shareable.get_cookie(AppConstants.CONTRIBUTION_ROUND)
client_name = peer_ctx.get_identity_name(default="?")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
if current_round == 0:
self.log_debug(fl_ctx, "skipping round 0")
return False # There is no aggregated model at round 0
if contribution_round != current_round:
self.log_warning(
fl_ctx,
f"discarding shareable from {client_name} for round: {contribution_round}. Current round is: {current_round}",
)
return False
validation_metric = dxo.get_meta_prop(self.validation_metric_name)
if validation_metric is None:
self.log_debug(fl_ctx, f"validation metric not existing in {client_name}")
return False
else:
self.log_info(fl_ctx, f"validation metric {validation_metric} from client {client_name}")
if self.weigh_by_local_iter:
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1.0)
else:
n_iter = 1.0
aggregation_weights = self.aggregation_weights.get(client_name, 1.0)
self.log_debug(fl_ctx, f"aggregation weight: {aggregation_weights}")
weight = n_iter * aggregation_weights
self.validation_metric_weighted_sum += validation_metric * weight
self.validation_metric_sum_of_weights += weight
return True
def _before_aggregate(self, fl_ctx):
if self.validation_metric_sum_of_weights == 0:
self.log_debug(fl_ctx, "nothing accumulated")
return False
self.val_metric = self.validation_metric_weighted_sum / self.validation_metric_sum_of_weights
self.logger.debug(f"weighted validation metric {self.val_metric}")
if self.val_metric > self.best_val_metric:
self.best_val_metric = self.val_metric
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"new best validation metric at round {current_round}: {self.best_val_metric}")
# Fire event to notify that the current global model is a new best
fl_ctx.set_prop(AppConstants.VALIDATION_RESULT, self.best_val_metric, private=True, sticky=False)
self.fire_event(AppEventType.GLOBAL_BEST_MODEL_AVAILABLE, fl_ctx)
self._reset_stats()
return True
| NVFlare-main | nvflare/app_common/ccwf/comps/simple_intime_model_selector.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/ccwf/comps/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
class SimpleModelShareableGenerator(ShareableGenerator):
def learnable_to_shareable(self, model_learnable: ModelLearnable, fl_ctx: FLContext) -> Shareable:
"""Convert ModelLearnable to Shareable.
Args:
model_learnable (ModelLearnable): model to be converted
fl_ctx (FLContext): FL context
Returns:
Shareable: a shareable containing a DXO object.
"""
dxo = model_learnable_to_dxo(model_learnable)
return dxo.to_shareable()
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
"""Convert Shareable to ModelLearnable.
Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS
Args:
shareable (Shareable): Shareable that contains a DXO object
fl_ctx (FLContext): FL context
Returns:
A ModelLearnable object
Raises:
TypeError: if shareable is not of type shareable
ValueError: if data_kind is not `DataKind.WEIGHTS` and is not `DataKind.WEIGHT_DIFF`
"""
if not isinstance(shareable, Shareable):
raise TypeError("shareable must be Shareable, but got {}.".format(type(shareable)))
dxo = from_shareable(shareable)
base_model = ModelLearnable()
if dxo.data_kind == DataKind.WEIGHT_DIFF:
base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
if not base_model:
self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
return base_model
weights = base_model[ModelLearnableKey.WEIGHTS]
if dxo.data is not None:
model_diff = dxo.data
for v_name, v_value in model_diff.items():
weights[v_name] = weights[v_name] + v_value
elif dxo.data_kind == DataKind.WEIGHTS:
weights = dxo.data
if not weights:
self.log_info(fl_ctx, "No model weights found. Model will not be updated.")
else:
base_model[ModelLearnableKey.WEIGHTS] = weights
else:
raise ValueError(
"data_kind should be either DataKind.WEIGHTS or DataKind.WEIGHT_DIFF, but got {}".format(dxo.data_kind)
)
base_model[ModelLearnableKey.META] = dxo.get_meta_props()
return base_model
| NVFlare-main | nvflare/app_common/ccwf/comps/simple_model_shareable_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.ccwf import SwarmClientController
from nvflare.app_common.ccwf.common import Constant
class CWEResultPrinter(FLComponent):
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == Constant.EXECUTOR_FINALIZED:
e = fl_ctx.get_prop(Constant.EXECUTOR)
if not isinstance(e, SwarmClientController):
return
if e.best_result:
self.log_info(fl_ctx, f"My Best Round: {e.best_round}")
self.log_info(fl_ctx, f"My Best Metric: {e.best_metric}")
self.log_info(fl_ctx, f"My Best Result: {e.best_result}")
else:
self.log_info(fl_ctx, "I have no best result")
| NVFlare-main | nvflare/app_common/ccwf/comps/cwe_result_printer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.model_desc import ModelDescriptor
from nvflare.app_common.np.constants import NPConstants
from nvflare.security.logging import secure_format_exception
def _get_run_dir(fl_ctx: FLContext):
job_id = fl_ctx.get_job_id()
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_dir = workspace.get_run_dir(job_id)
return run_dir
class NPFileModelPersistor(ModelPersistor):
def __init__(
self,
last_global_model_file_name="last_global_model.npy",
best_global_model_file_name="best_global_model.npy",
model_dir="models",
initial_model_file_name="initial_model.npy",
):
super().__init__()
self.model_dir = model_dir
self.last_global_model_file_name = last_global_model_file_name
self.best_global_model_file_name = best_global_model_file_name
self.initial_model_file_name = initial_model_file_name
# This is default model that will be used if not local model is provided.
self.default_data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
run_dir = _get_run_dir(fl_ctx)
model_path = os.path.join(run_dir, self.model_dir, self.initial_model_file_name)
try:
# try loading previous model
data = np.load(model_path)
except Exception as e:
self.log_info(
fl_ctx,
f"Unable to load model from {model_path}: {secure_format_exception(e)}. Using default data instead.",
fire_event=False,
)
data = self.default_data.copy()
model_learnable = make_model_learnable(weights={NPConstants.NUMPY_KEY: data}, meta_props={})
self.log_info(fl_ctx, f"Loaded initial model: {model_learnable}")
return model_learnable
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
self._save(fl_ctx, model_learnable, self.last_global_model_file_name)
def _save(self, fl_ctx: FLContext, model_learnable: ModelLearnable, file_name: str):
run_dir = _get_run_dir(fl_ctx)
model_root_dir = os.path.join(run_dir, self.model_dir)
if not os.path.exists(model_root_dir):
os.makedirs(model_root_dir)
model_path = os.path.join(model_root_dir, file_name)
np.save(model_path, model_learnable[ModelLearnableKey.WEIGHTS][NPConstants.NUMPY_KEY])
self.log_info(fl_ctx, f"Saved numpy model to: {model_path}")
self.log_info(fl_ctx, f"Model: {model_learnable}")
def handle_event(self, event: str, fl_ctx: FLContext):
if event == AppEventType.GLOBAL_BEST_MODEL_AVAILABLE:
# save the current model as the best model!
model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
self._save(fl_ctx, model, self.best_global_model_file_name)
def _model_file_path(self, fl_ctx: FLContext, file_name):
run_dir = _get_run_dir(fl_ctx)
model_root_dir = os.path.join(run_dir, self.model_dir)
return os.path.join(model_root_dir, file_name)
def _add_to_inventory(self, inventory: dict, fl_ctx: FLContext, file_name: str):
location = self._model_file_path(fl_ctx, file_name)
base_name = os.path.basename(location).split(".")[0]
if os.path.isfile(location):
desc = ModelDescriptor(
name=base_name,
location=location,
model_format="np",
props={},
)
inventory[desc.name] = desc
def get_model_inventory(self, fl_ctx: FLContext) -> {str: ModelDescriptor}:
"""Get the model inventory of the ModelPersistor.
Args:
fl_ctx: FLContext
Returns: { model_kind: ModelDescriptor }
"""
inventory = {}
self._add_to_inventory(inventory, fl_ctx, self.best_global_model_file_name)
self._add_to_inventory(inventory, fl_ctx, self.last_global_model_file_name)
return inventory
def get_model(self, model_file: str, fl_ctx: FLContext) -> ModelLearnable:
inventory = self.get_model_inventory(fl_ctx)
if not inventory:
return None
desc = inventory.get(model_file)
if not desc:
return None
location = desc.location
if os.path.isfile(location):
try:
# try loading previous model
data = np.load(location)
except Exception as e:
self.log_error(fl_ctx, f"Unable to load model from {location}: {secure_format_exception(e)}.")
return None
model_learnable = make_model_learnable(weights={NPConstants.NUMPY_KEY: data}, meta_props={})
self.log_info(fl_ctx, f"loaded model from {location}")
return model_learnable
else:
self.log_error(fl_ctx, f"no such model file: {location}")
return None
| NVFlare-main | nvflare/app_common/ccwf/comps/np_file_model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import random
import time
import numpy as np
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import ModelLearnable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.np.constants import NPConstants
from nvflare.security.logging import secure_format_exception
class NPTrainer(Executor):
def __init__(
self,
delta=1,
sleep_time=0,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
validate_model_task_name=AppConstants.TASK_VALIDATION,
model_name="best_numpy.npy",
model_dir="model",
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
if not (isinstance(delta, float) or isinstance(delta, int)):
raise TypeError("delta must be an instance of float or int.")
self._delta = delta
self._model_name = model_name
self._model_dir = model_dir
self._sleep_time = sleep_time
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
self._validate_model_task_name = validate_model_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# Create all major components here. This is a simple app that doesn't need any components.
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc.)
pass
def _train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
# First we extract DXO from the shareable.
try:
incoming_dxo = from_shareable(shareable)
except Exception as e:
self.system_panic(
f"Unable to convert shareable to model definition. Exception {secure_format_exception(e)}", fl_ctx
)
return make_reply(ReturnCode.BAD_TASK_DATA)
# Information about workflow is retrieved from the shareable header.
current_round = shareable.get_header(AppConstants.CURRENT_ROUND, None)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS, None)
# Ensure that data is of type weights. Extract model data.
if incoming_dxo.data_kind != DataKind.WEIGHTS:
self.system_panic("Model DXO should be of kind DataKind.WEIGHTS.", fl_ctx)
return make_reply(ReturnCode.BAD_TASK_DATA)
np_data = copy.deepcopy(incoming_dxo.data)
# Display properties.
self.log_info(fl_ctx, f"Incoming data kind: {incoming_dxo.data_kind}")
self.log_info(fl_ctx, f"Model: \n{np_data}")
self.log_info(fl_ctx, f"Current Round: {current_round}")
self.log_info(fl_ctx, f"Total Rounds: {total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Doing some dummy training.
if np_data:
if NPConstants.NUMPY_KEY in np_data:
np_data[NPConstants.NUMPY_KEY] += self._delta
else:
self.log_error(fl_ctx, "numpy_key not found in model.")
return make_reply(ReturnCode.BAD_TASK_DATA)
else:
self.log_error(fl_ctx, "No model weights found in shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# We check abort_signal regularly to make sure
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save local numpy model
try:
self._save_local_model(fl_ctx, np_data)
except Exception as e:
self.log_error(fl_ctx, f"Exception in saving local model: {secure_format_exception(e)}.")
self.log_info(
fl_ctx,
f"Model after training: {np_data}",
)
# Checking abort signal again.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Prepare a DXO for our updated model. Create shareable and return
fake_metric = random.uniform(0.1, 1.0)
d = self._delta
outgoing_np = {NPConstants.NUMPY_KEY: np.array([[d, d, d], [d, d, d], [d, d, d]], dtype=np.float32)}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHT_DIFF,
data=outgoing_np,
meta={
MetaKey.NUM_STEPS_CURRENT_ROUND: 1,
MetaKey.INITIAL_METRICS: fake_metric,
},
)
# artificial delay
# if fl_ctx.get_identity_name() == "blue":
# time.sleep(3.0)
# time.sleep(random.uniform(1.0, 5.0))
return outgoing_dxo.to_shareable()
def _submit_model(self, fl_ctx: FLContext, abort_signal: Signal):
# Retrieve the local model saved during training.
np_data = None
try:
np_data = self._load_local_model(fl_ctx)
except Exception as e:
self.log_error(fl_ctx, f"Unable to load model: {secure_format_exception(e)}")
# Checking abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Create DXO and shareable from model data.
model_shareable = Shareable()
if np_data:
outgoing_dxo = DXO(data_kind=DataKind.WEIGHTS, data=np_data)
model_shareable = outgoing_dxo.to_shareable()
else:
# Set return code.
self.log_error(fl_ctx, "local model not found.")
model_shareable.set_return_code(ReturnCode.EXECUTION_RESULT_ERROR)
return model_shareable
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any long tasks should check abort_signal regularly. Otherwise, abort client
# will not work.
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
time.sleep(interval)
count += interval
self.log_info(fl_ctx, f"Task name: {task_name}")
try:
if task_name == self._train_task_name:
return self._train(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
elif task_name == self._submit_model_task_name:
return self._submit_model(fl_ctx=fl_ctx, abort_signal=abort_signal)
elif task_name == self._validate_model_task_name:
return self._validate_model(shareable, fl_ctx, abort_signal)
else:
# If unknown task name, set RC accordingly.
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
self.log_exception(fl_ctx, f"Exception in NPTrainer execute: {secure_format_exception(e)}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _validate_model(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
dxo = from_shareable(shareable)
self.log_info(fl_ctx, f"Validating model {dxo}")
fake_metric = random.uniform(0.1, 1.0)
val_results = {"val_accuracy": fake_metric}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
def _load_local_model(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(job_id)
model_path = os.path.join(run_dir, self._model_dir)
model_load_path = os.path.join(model_path, self._model_name)
try:
np_data = np.load(model_load_path)
except Exception as e:
self.log_error(fl_ctx, f"Unable to load local model: {secure_format_exception(e)}")
return None
model = ModelLearnable()
model[NPConstants.NUMPY_KEY] = np_data
return model
def _save_local_model(self, fl_ctx: FLContext, model: dict):
# Save local model
engine = fl_ctx.get_engine()
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(job_id)
model_path = os.path.join(run_dir, self._model_dir)
if not os.path.exists(model_path):
os.makedirs(model_path)
model_save_path = os.path.join(model_path, self._model_name)
np.save(model_save_path, model[NPConstants.NUMPY_KEY])
self.log_info(fl_ctx, f"Saved numpy model to: {model_save_path}")
| NVFlare-main | nvflare/app_common/ccwf/comps/np_trainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NPConstants:
NUMPY_KEY = "numpy_key"
| NVFlare-main | nvflare/app_common/np/constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/np/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import numpy as np
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.security.logging import secure_format_exception
from .constants import NPConstants
class NPModelLocator(ModelLocator):
SERVER_MODEL_NAME = "server"
def __init__(self, model_dir="models", model_name="server.npy"):
"""The ModelLocator's job is to find the models to be included for cross site evaluation
located on server. This NPModelLocator finds and extracts "server" model that is saved during training.
Args:
model_dir (str): Directory to look for models in. Defaults to "model"
model_name (str). Name of the model. Defaults to "server.npy"
"""
super().__init__()
self.model_dir = model_dir
self.model_file_name = model_name
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
"""Returns the list of model names that should be included from server in cross site validation.add()
Args:
fl_ctx (FLContext): FL Context object.
Returns:
List[str]: List of model names.
"""
return [NPModelLocator.SERVER_MODEL_NAME]
def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:
dxo = None
engine = fl_ctx.get_engine()
if model_name == NPModelLocator.SERVER_MODEL_NAME:
try:
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(job_id)
model_path = os.path.join(run_dir, self.model_dir)
model_load_path = os.path.join(model_path, self.model_file_name)
np_data = None
try:
np_data = np.load(model_load_path, allow_pickle=False)
self.log_info(fl_ctx, f"Loaded {model_name} model from {model_load_path}.")
except Exception as e:
self.log_error(fl_ctx, f"Unable to load NP Model: {secure_format_exception(e)}.")
if np_data is not None:
weights = {NPConstants.NUMPY_KEY: np_data}
dxo = DXO(data_kind=DataKind.WEIGHTS, data=weights, meta={})
except Exception as e:
self.log_exception(
fl_ctx,
f"Exception in retrieving {NPModelLocator.SERVER_MODEL_NAME} model: {secure_format_exception(e)}.",
)
return dxo
| NVFlare-main | nvflare/app_common/np/np_model_locator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind, from_bytes
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.formatter import Formatter
from nvflare.app_common.app_constant import AppConstants
from nvflare.security.logging import secure_format_exception
class NPFormatter(Formatter):
def __init__(self) -> None:
super().__init__()
def format(self, fl_ctx: FLContext) -> str:
"""The format function gets validation shareable locations from the dictionary. It loads each shareable,
get the validation results and converts it into human-readable string.
Args:
fl_ctx (FLContext): FLContext object.
Returns:
str: Human readable validation results.
"""
# Get the val shareables
validation_shareables_dict = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT, {})
# Result dictionary
res = {}
try:
# This is a 2d dictionary with each validation result at
# validation_shareables_dict[data_client][model_client]
for data_client in validation_shareables_dict.keys():
validation_dict = validation_shareables_dict[data_client]
if validation_dict:
res[data_client] = {}
for model_name in validation_dict.keys():
dxo_path = validation_dict[model_name]
# Load the shareable
with open(dxo_path, "rb") as f:
metric_dxo = from_bytes(f.read())
# Get metrics from shareable
if metric_dxo and metric_dxo.data_kind == DataKind.METRICS:
metrics = metric_dxo.data
res[data_client][model_name] = metrics
except Exception as e:
self.log_error(fl_ctx, f"Exception: {secure_format_exception(e)}")
return f"{res}"
| NVFlare-main | nvflare/app_common/np/np_formatter.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.security.logging import secure_format_exception
from .constants import NPConstants
def _get_run_dir(fl_ctx: FLContext):
engine = fl_ctx.get_engine()
if engine is None:
raise RuntimeError("engine is missing in fl_ctx.")
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
if job_id is None:
raise RuntimeError("job_id is missing in fl_ctx.")
run_dir = engine.get_workspace().get_run_dir(job_id)
return run_dir
class NPModelPersistor(ModelPersistor):
def __init__(self, model_dir="models", model_name="server.npy"):
super().__init__()
self.model_dir = model_dir
self.model_name = model_name
# This is default model that will be used if not local model is provided.
self.default_data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
run_dir = _get_run_dir(fl_ctx)
model_path = os.path.join(run_dir, self.model_dir, self.model_name)
try:
# try loading previous model
data = np.load(model_path)
except Exception as e:
self.log_info(
fl_ctx,
f"Unable to load model from {model_path}: {secure_format_exception(e)}. Using default data instead.",
fire_event=False,
)
data = self.default_data.copy()
model_learnable = make_model_learnable(weights={NPConstants.NUMPY_KEY: data}, meta_props={})
self.log_info(fl_ctx, f"Loaded initial model: {model_learnable[ModelLearnableKey.WEIGHTS]}")
return model_learnable
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
run_dir = _get_run_dir(fl_ctx)
model_root_dir = os.path.join(run_dir, self.model_dir)
if not os.path.exists(model_root_dir):
os.makedirs(model_root_dir)
model_path = os.path.join(model_root_dir, self.model_name)
np.save(model_path, model_learnable[ModelLearnableKey.WEIGHTS][NPConstants.NUMPY_KEY])
self.log_info(fl_ctx, f"Saved numpy model to: {model_path}")
| NVFlare-main | nvflare/app_common/np/np_model_persistor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import ModelLearnable
from nvflare.app_common.app_constant import AppConstants
from nvflare.security.logging import secure_format_exception
from .constants import NPConstants
class NPTrainer(Executor):
def __init__(
self,
delta=1,
sleep_time=0,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
model_name="best_numpy.npy",
model_dir="model",
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
if not (isinstance(delta, float) or isinstance(delta, int)):
raise TypeError("delta must be an instance of float or int.")
self._delta = delta
self._model_name = model_name
self._model_dir = model_dir
self._sleep_time = sleep_time
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# Create all major components here. This is a simple app that doesn't need any components.
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc.)
pass
def _train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
# First we extract DXO from the shareable.
try:
incoming_dxo = from_shareable(shareable)
except Exception as e:
self.system_panic(
f"Unable to convert shareable to model definition. Exception {secure_format_exception(e)}", fl_ctx
)
return make_reply(ReturnCode.BAD_TASK_DATA)
# Information about workflow is retrieved from the shareable header.
current_round = shareable.get_header(AppConstants.CURRENT_ROUND, None)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS, None)
# Ensure that data is of type weights. Extract model data.
if incoming_dxo.data_kind != DataKind.WEIGHTS:
self.system_panic("Model DXO should be of kind DataKind.WEIGHTS.", fl_ctx)
return make_reply(ReturnCode.BAD_TASK_DATA)
np_data = incoming_dxo.data
# Display properties.
self.log_info(fl_ctx, f"Incoming data kind: {incoming_dxo.data_kind}")
self.log_info(fl_ctx, f"Model: \n{np_data}")
self.log_info(fl_ctx, f"Current Round: {current_round}")
self.log_info(fl_ctx, f"Total Rounds: {total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Doing some dummy training.
if np_data:
if NPConstants.NUMPY_KEY in np_data:
np_data[NPConstants.NUMPY_KEY] += self._delta
else:
self.log_error(fl_ctx, "numpy_key not found in model.")
return make_reply(ReturnCode.BAD_TASK_DATA)
else:
self.log_error(fl_ctx, "No model weights found in shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# We check abort_signal regularly to make sure
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save local numpy model
try:
self._save_local_model(fl_ctx, np_data)
except Exception as e:
self.log_error(fl_ctx, f"Exception in saving local model: {secure_format_exception(e)}.")
self.log_info(
fl_ctx,
f"Model after training: {np_data}",
)
# Checking abort signal again.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Prepare a DXO for our updated model. Create shareable and return
outgoing_dxo = DXO(data_kind=incoming_dxo.data_kind, data=np_data, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: 1})
return outgoing_dxo.to_shareable()
def _submit_model(self, fl_ctx: FLContext, abort_signal: Signal):
# Retrieve the local model saved during training.
np_data = None
try:
np_data = self._load_local_model(fl_ctx)
except Exception as e:
self.log_error(fl_ctx, f"Unable to load model: {secure_format_exception(e)}")
# Checking abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Create DXO and shareable from model data.
model_shareable = Shareable()
if np_data:
outgoing_dxo = DXO(data_kind=DataKind.WEIGHTS, data=np_data)
model_shareable = outgoing_dxo.to_shareable()
else:
# Set return code.
self.log_error(fl_ctx, "local model not found.")
model_shareable.set_return_code(ReturnCode.EXECUTION_RESULT_ERROR)
return model_shareable
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any long tasks should check abort_signal regularly. Otherwise, abort client
# will not work.
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
time.sleep(interval)
count += interval
self.log_info(fl_ctx, f"Task name: {task_name}")
try:
if task_name == self._train_task_name:
return self._train(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
elif task_name == self._submit_model_task_name:
return self._submit_model(fl_ctx=fl_ctx, abort_signal=abort_signal)
else:
# If unknown task name, set RC accordingly.
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
self.log_exception(fl_ctx, f"Exception in NPTrainer execute: {secure_format_exception(e)}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _load_local_model(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(job_id)
model_path = os.path.join(run_dir, self._model_dir)
model_load_path = os.path.join(model_path, self._model_name)
try:
np_data = np.load(model_load_path)
except Exception as e:
self.log_error(fl_ctx, f"Unable to load local model: {secure_format_exception(e)}")
return None
model = ModelLearnable()
model[NPConstants.NUMPY_KEY] = np_data
return model
def _save_local_model(self, fl_ctx: FLContext, model: dict):
# Save local model
engine = fl_ctx.get_engine()
job_id = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(job_id)
model_path = os.path.join(run_dir, self._model_dir)
if not os.path.exists(model_path):
os.makedirs(model_path)
model_save_path = os.path.join(model_path, self._model_name)
np.save(model_save_path, model[NPConstants.NUMPY_KEY])
self.log_info(fl_ctx, f"Saved numpy model to: {model_save_path}")
| NVFlare-main | nvflare/app_common/np/np_trainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import numpy as np
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.security.logging import secure_format_exception
from .constants import NPConstants
class NPValidator(Executor):
def __init__(
self,
epsilon=1,
sleep_time=0,
validate_task_name=AppConstants.TASK_VALIDATION,
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
self.logger = logging.getLogger("NPValidator")
self._random_epsilon = epsilon
self._sleep_time = sleep_time
self._validate_task_name = validate_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# Create all major components here. This is a simple app that doesn't need any components.
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc.)
pass
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any long tasks should check abort_signal regularly.
# Otherwise, abort client will not work.
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
time.sleep(interval)
count += interval
if task_name == self._validate_task_name:
try:
# First we extract DXO from the shareable.
try:
model_dxo = from_shareable(shareable)
except Exception as e:
self.log_error(
fl_ctx, f"Unable to extract model dxo from shareable. Exception: {secure_format_exception(e)}"
)
return make_reply(ReturnCode.BAD_TASK_DATA)
# Get model from shareable. data_kind must be WEIGHTS.
if model_dxo.data and model_dxo.data_kind == DataKind.WEIGHTS:
model = model_dxo.data
else:
self.log_error(
fl_ctx, "Model DXO doesn't have data or is not of type DataKind.WEIGHTS. Unable to validate."
)
return make_reply(ReturnCode.BAD_TASK_DATA)
# Check if key exists in model
if NPConstants.NUMPY_KEY not in model:
self.log_error(fl_ctx, "numpy_key not in model. Unable to validate.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# The workflow provides MODEL_OWNER information in the shareable header.
model_name = shareable.get_header(AppConstants.MODEL_OWNER, "?")
# Print properties.
self.log_info(fl_ctx, f"Model: \n{model}")
self.log_info(fl_ctx, f"Task name: {task_name}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
self.log_info(fl_ctx, f"Validating model from {model_name}.")
# Check abort signal regularly.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Do some dummy validation.
random_epsilon = np.random.random()
self.log_info(fl_ctx, f"Adding random epsilon {random_epsilon} in validation.")
val_results = {}
np_data = model[NPConstants.NUMPY_KEY]
np_data = np.sum(np_data / np.max(np_data))
val_results["accuracy"] = np_data + random_epsilon
# Check abort signal regularly.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Validation result: {val_results}")
# Create DXO for metrics and return shareable.
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
except Exception as e:
self.log_exception(fl_ctx, f"Exception in NPValidator execute: {secure_format_exception(e)}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
| NVFlare-main | nvflare/app_common/np/np_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.tracking.tracker_types import LogWriterName
class LogWriterForMetricsExchanger(FLComponent, ABC):
def __init__(self, metrics_exchanger_id: str):
"""Base class for log writer for MetricsExchanger.
Args:
metrics_exchanger_id (str, optional): Expects MetricsExchanger with this id. Defaults to None.
"""
super().__init__()
self.metrics_exchanger_id = metrics_exchanger_id
self.sender = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
engine = fl_ctx.get_engine()
self.sender = engine.get_component(self.metrics_exchanger_id)
if self.sender is None:
self.task_panic("Cannot load MetricsExchanger!", fl_ctx=fl_ctx)
def log(self, key: str, value: Any, data_type: AnalyticsDataType, **kwargs):
self.sender.log(key=key, value=value, data_type=data_type, **kwargs)
@abstractmethod
def get_writer_name(self) -> LogWriterName:
pass
| NVFlare-main | nvflare/app_common/tracking/log_writer_me.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/tracking/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ExpTrackingException(Exception):
def __init__(self, message, **kwargs):
"""
Args:
message: The message or exception describing the error that occurred.
**kwargs: Additional key-value pairs
"""
message = str(message)
self.message = message
self.kwargs = kwargs
super().__init__(message)
| NVFlare-main | nvflare/app_common/tracking/track_exception.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.tracking.tracker_types import LogWriterName
from nvflare.app_common.widgets.streaming import ANALYTIC_EVENT_TYPE, AnalyticsSender
class LogWriter(FLComponent, ABC):
def __init__(self, event_type: str = ANALYTIC_EVENT_TYPE):
super().__init__()
self.sender = self.load_log_sender(event_type)
self.engine = None
def load_log_sender(self, event_type: str = ANALYTIC_EVENT_TYPE) -> AnalyticsSender:
return AnalyticsSender(event_type, self.get_writer_name())
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.ABOUT_TO_START_RUN:
self.sender.engine = fl_ctx.get_engine()
@abstractmethod
def get_writer_name(self) -> LogWriterName:
pass
| NVFlare-main | nvflare/app_common/tracking/log_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
ANALYTIC_EVENT_TYPE = "analytix_log_stats"
class LogWriterName(Enum):
TORCH_TB = "TORCH_TENSORBOARD"
MLFLOW = "MLFLOW"
WANDB = "WEIGHTS_AND_BIASES"
class TrackConst(object):
TRACKER_KEY = "tracker_key"
TRACK_KEY = "track_key"
TRACK_VALUE = "track_value"
TAG_KEY = "tag_key"
TAGS_KEY = "tags_key"
EXP_TAGS_KEY = "tags_key"
GLOBAL_STEP_KEY = "global_step"
PATH_KEY = "path"
DATA_TYPE_KEY = "analytics_data_type"
KWARGS_KEY = "analytics_kwargs"
PROJECT_NAME = "project_name"
PROJECT_TAGS = "project_name"
EXPERIMENT_NAME = "experiment_name"
RUN_NAME = "run_name"
EXPERIMENT_TAGS = "experiment_tags"
INIT_CONFIG = "init_config"
RUN_TAGS = "run_tags"
SITE_KEY = "site"
JOB_ID_KEY = "job_id"
| NVFlare-main | nvflare/app_common/tracking/tracker_types.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/response_processors/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.client import Client
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import make_model_learnable
from nvflare.app_common.abstract.response_processor import ResponseProcessor
from nvflare.app_common.app_constant import AppConstants
class WeightMethod(object):
FIRST = "first"
CLIENT = "client"
class GlobalWeightsInitializer(ResponseProcessor):
def __init__(
self,
weights_prop_name: str = AppConstants.GLOBAL_MODEL,
weight_method: str = WeightMethod.FIRST,
client_name: str = None,
):
"""Set global model weights based on specified weight setting method.
Args:
weights_prop_name: name of the prop to be set into fl_ctx for the determined global weights
weight_method: the method to select final weights: one of "first", "client"
client_name: the name of the client to be used as the weight provider
If weight_method is "first", then use the weights reported from the first client;
If weight_method is "client", then only use the weights reported from the specified client.
"""
if weight_method not in [WeightMethod.FIRST, WeightMethod.CLIENT]:
raise ValueError(f"invalid weight_method '{weight_method}'")
if weight_method == WeightMethod.CLIENT and not client_name:
raise ValueError(f"client name not provided for weight method '{WeightMethod.CLIENT}'")
if weight_method == WeightMethod.CLIENT and not isinstance(client_name, str):
raise ValueError(
f"client name should be a single string for weight method '{WeightMethod.CLIENT}' but it is {client_name} "
)
ResponseProcessor.__init__(self)
self.weights_prop_name = weights_prop_name
self.weight_method = weight_method
self.client_name = client_name
self.final_weights = None
def create_task_data(self, task_name: str, fl_ctx: FLContext) -> Shareable:
"""Create the data for the task to be sent to clients to collect their weights
Args:
task_name: name of the task
fl_ctx: the FL context
Returns: task data
"""
# reset internal state in case this processor is used multiple times
self.final_weights = None
return Shareable()
def process_client_response(self, client: Client, task_name: str, response: Shareable, fl_ctx: FLContext) -> bool:
"""Process the weights submitted by a client.
Args:
client: the client that submitted the response
task_name: name of the task
response: submitted data from the client
fl_ctx: FLContext
Returns:
boolean to indicate if the client data is acceptable.
If not acceptable, the control flow will exit.
"""
if not isinstance(response, Shareable):
self.log_error(
fl_ctx,
f"bad response from client {client.name}: " f"response must be Shareable but got {type(response)}",
)
return False
try:
dxo = from_shareable(response)
except Exception:
self.log_exception(fl_ctx, f"bad response from client {client.name}: " f"it does not contain DXO")
return False
if dxo.data_kind != DataKind.WEIGHTS:
self.log_error(
fl_ctx,
f"bad response from client {client.name}: "
f"data_kind should be DataKind.WEIGHTS but got {dxo.data_kind}",
)
return False
weights = dxo.data
if not weights:
self.log_error(fl_ctx, f"No model weights found from client {client.name}")
return False
if not self.final_weights and (
self.weight_method == WeightMethod.FIRST
or (self.weight_method == WeightMethod.CLIENT and client.name == self.client_name)
):
self.final_weights = weights
return True
def final_process(self, fl_ctx: FLContext) -> bool:
"""Perform the final check on all the received weights from the clients.
Args:
fl_ctx: FLContext
Returns:
boolean indicating whether the final response processing is successful.
If not successful, the control flow will exit.
"""
if not self.final_weights:
self.log_error(fl_ctx, "no weights available from clients")
return False
# must set sticky to True so other controllers can get it!
fl_ctx.set_prop(self.weights_prop_name, make_model_learnable(self.final_weights, {}), private=True, sticky=True)
return True
| NVFlare-main | nvflare/app_common/response_processors/global_weights_initializer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.init_final_component import InitFinalArgsComponent
class PSIWorkflow(InitFinalArgsComponent, ABC):
"""
PSIWorkflow is an interface for different PSI algorithms
for example, DDH-Based PSI, Homomorphic-based PSI etc.
"""
@abstractmethod
def pre_process(self, abort_signal: Signal) -> bool:
pass
@abstractmethod
def run(self, abort_signal: Signal) -> bool:
pass
@abstractmethod
def post_process(self, abort_signal: Signal) -> bool:
pass
| NVFlare-main | nvflare/app_common/psi/psi_workflow_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.storage import StorageException
from nvflare.app_common.psi.psi_spec import PSIWriter
def _validate_directory(full_path: str):
if not os.path.isabs(full_path):
raise ValueError(f"path {full_path} must be an absolute path.")
parent_dir = os.path.dirname(full_path)
if os.path.exists(parent_dir) and not os.path.isdir(parent_dir):
raise ValueError(f"directory {parent_dir} exists but is not a directory.")
if not os.path.exists(parent_dir):
os.makedirs(parent_dir, exist_ok=False)
class FilePSIWriter(PSIWriter):
def __init__(self, output_path: str):
super().__init__()
if len(output_path) == 0:
raise ValueError(f"output_path {output_path} is empty string")
self.output_path = output_path
def save(
self,
intersection: List[str],
overwrite_existing,
fl_ctx: FLContext,
):
full_uri = self.get_output_path(fl_ctx)
_validate_directory(full_uri)
data_exists = os.path.isfile(full_uri)
if data_exists and not overwrite_existing:
raise StorageException("object {} already exists and overwrite_existing is False".format(full_uri))
self.log_info(fl_ctx, f"trying to save data to {full_uri}")
with open(full_uri, "w") as fp:
fp.write("\n".join(intersection))
self.log_info(fl_ctx, f"file {full_uri} saved")
def get_output_path(self, fl_ctx: FLContext) -> str:
job_dir = os.path.dirname(os.path.abspath(fl_ctx.get_prop(FLContextKey.APP_ROOT)))
self.log_info(fl_ctx, "job dir = " + job_dir)
return os.path.join(job_dir, fl_ctx.get_identity_name(), self.output_path)
| NVFlare-main | nvflare/app_common/psi/file_psi_writer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/psi/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
class PSIWriter(FLComponent, ABC):
"""Interface for saving PSI intersection."""
@abstractmethod
def save(self, intersection: List[str], overwrite_existing: bool, fl_ctx: FLContext):
"""Saves PSI intersection.
Args:
intersection: (List[str]) - Intersection to be saved
overwrite_existing: (bool) overwrite the existing one if true
fl_ctx: (FLContext)
"""
pass
| NVFlare-main | nvflare/app_common/psi/psi_writer_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nvflare.apis.client import Client
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import PSIConst
from nvflare.app_common.psi.psi_workflow_spec import PSIWorkflow
from nvflare.app_common.utils.component_utils import check_component_type
from nvflare.app_common.workflows.error_handling_controller import ErrorHandlingController
class PSIController(ErrorHandlingController):
def __init__(self, psi_workflow_id: str):
super().__init__()
self.psi_workflow_id = psi_workflow_id
self.psi_workflow: Optional[PSIWorkflow] = None
self.fl_ctx = None
self.task_name = PSIConst.TASK
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
self.log_info(fl_ctx, f"{self.task_name} control flow started.")
if abort_signal.triggered:
return False
self.log_info(fl_ctx, "start pre workflow")
self.psi_workflow.pre_process(abort_signal)
if abort_signal.triggered:
return False
self.log_info(fl_ctx, "start workflow")
self.psi_workflow.run(abort_signal)
if abort_signal.triggered:
return False
self.log_info(fl_ctx, "start post workflow")
self.psi_workflow.post_process(abort_signal)
self.log_info(fl_ctx, f"task {self.task_name} control flow end.")
def start_controller(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
psi_workflow = self.load_psi_workflow(fl_ctx)
self.psi_workflow = psi_workflow
def stop_controller(self, fl_ctx: FLContext):
self.psi_workflow.finalize(fl_ctx)
def process_result_of_unknown_task(
self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext
):
pass
def load_psi_workflow(self, fl_ctx: FLContext) -> PSIWorkflow:
engine = fl_ctx.get_engine()
psi_workflow: PSIWorkflow = engine.get_component(self.psi_workflow_id)
psi_workflow.initialize(fl_ctx, controller=self)
check_component_type(psi_workflow, PSIWorkflow)
return psi_workflow
| NVFlare-main | nvflare/app_common/psi/psi_controller.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.task_handler import TaskHandler
from nvflare.app_common.executors.error_handling_executor import ErrorHandlingExecutor
from nvflare.app_common.utils.component_utils import check_component_type
class PSIExecutor(ErrorHandlingExecutor):
def __init__(self, psi_algo_id: str):
super().__init__()
self.psi_algo_id = psi_algo_id
def get_data_kind(self) -> str:
return DataKind.PSI
def get_task_handler(self, fl_ctx: FLContext) -> TaskHandler:
return self.load_task_handler(self.psi_algo_id, fl_ctx)
def load_task_handler(self, psi_algo: str, fl_ctx: FLContext) -> TaskHandler:
engine = fl_ctx.get_engine()
psi_task_handler = engine.get_component(psi_algo) if psi_algo else None
self.check_psi_algo(psi_task_handler, fl_ctx)
psi_task_handler.initialize(fl_ctx)
return psi_task_handler
def check_psi_algo(self, psi_task_handler: TaskHandler, fl_ctx):
if not psi_task_handler:
self.log_error(fl_ctx, f"PSI algorithm specified by {self.psi_algo_id} is not implemented")
raise NotImplementedError
check_component_type(psi_task_handler, TaskHandler)
| NVFlare-main | nvflare/app_common/psi/psi_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Optional
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.init_final_component import InitFinalComponent
from nvflare.app_common.psi.psi_writer_spec import PSIWriter
from nvflare.app_common.utils.component_utils import check_component_type
class PSI(InitFinalComponent, ABC):
"""The PSI interface is intended for the end-user interface to
get an intersection of items of clients specified without knowing::
1) the details of PSI algorithms
2) real client's own items to other FL clients and FL Servers
"""
def __init__(self, psi_writer_id: str):
"""
Args:
psi_writer_id: a PSIWriter id, we will use it to get PSIWriter from engine.
PSIWriter will be used save the intersection results.
For example, FilePSIWriter implements PSIWriter interface and save to the local disk
"""
super().__init__()
self.psi_writer_id = psi_writer_id
self.psi_writer: Optional[PSIWriter] = None
self.fl_ctx = None
self.intersection: Optional[List[str]] = None
def initialize(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
engine = fl_ctx.get_engine()
psi_writer: PSIWriter = engine.get_component(self.psi_writer_id)
check_component_type(psi_writer, PSIWriter)
self.psi_writer = psi_writer
@abstractmethod
def load_items(self) -> List[str]:
"""This method needs to be implemented to provide the list of items to PSI algorithm in order to
calculate intersection.
Returns: List of Items to be used for intersection calculation
"""
pass
def get_intersection(self) -> Optional[List[str]]:
"""This method will return the calculated intersection once PSI job is completed and successful.
Returns: Intersection result or None
"""
return self.intersection
def save(self, intersection: List[str]):
self.intersection = intersection
if self.psi_writer:
self.psi_writer.save(intersection=intersection, overwrite_existing=True, fl_ctx=self.fl_ctx)
def finalize(self, fl_ctx: FLContext):
pass
| NVFlare-main | nvflare/app_common/psi/psi_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/psi/dh_psi/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.psi.dh_psi.dh_psi_workflow import DhPSIWorkFlow
from nvflare.app_common.psi.psi_controller import PSIController
from nvflare.app_common.psi.psi_workflow_spec import PSIWorkflow
class DhPSIController(PSIController):
def __init__(self):
super().__init__("")
def load_psi_workflow(self, fl_ctx: FLContext) -> PSIWorkflow:
psi_workflow = DhPSIWorkFlow()
psi_workflow.initialize(fl_ctx, controller=self)
return psi_workflow
| NVFlare-main | nvflare/app_common/psi/dh_psi/dh_psi_controller.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, NamedTuple, Set
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import PSIConst
from nvflare.app_common.psi.psi_workflow_spec import PSIWorkflow
from nvflare.app_common.workflows.broadcast_operator import BroadcastAndWait
from nvflare.utils.decorators import measure_time
class SiteSize(NamedTuple):
name: str
size: int
class DhPSIWorkFlow(PSIWorkflow):
def __init__(self, bloom_filter_fpr: float = 1e-11):
super().__init__()
self.task_name = PSIConst.TASK
self.bloom_filter_fpr: float = bloom_filter_fpr
self.wait_time_after_min_received = 0
self.abort_signal = None
self.fl_ctx = None
self.controller = None
self.ordered_sites: List[SiteSize] = []
self.forward_processed: Dict[str, int] = {}
self.backward_processed: Dict[str, int] = {}
def initialize(self, fl_ctx: FLContext, **kwargs):
self.fl_ctx = fl_ctx
self.controller = kwargs["controller"]
def pre_process(self, abort_signal: Signal) -> bool:
# ask client send back their item sizes
# sort client by ascending order
self.log_info(self.fl_ctx, f"pre_process on task {self.task_name}")
if abort_signal.triggered:
return False
self.abort_signal = abort_signal
self.prepare_sites(abort_signal)
def run(self, abort_signal: Signal):
if abort_signal.triggered:
return False
self.abort_signal = abort_signal
self.log_info(self.fl_ctx, f"order sites = {self.ordered_sites}")
intersect_site = self.forward_pass(self.ordered_sites, self.forward_processed)
self.log_info(
self.fl_ctx,
f"forward_processed sites {self.forward_processed}\n,"
f"intersect_sites={intersect_site}\n"
f"ordered sites = {self.ordered_sites}\n",
)
self.check_processed_sites(intersect_site, self.forward_processed)
self.backward_processed.update(self.backward_pass(self.ordered_sites, intersect_site))
self.log_info(
self.fl_ctx,
f"backward_processed sites {self.backward_processed}\n,"
f"intersect_sites={intersect_site}\n"
f"ordered sites = {self.ordered_sites}\n",
)
self.check_final_intersection_sizes(intersect_site)
self.log_pass_time_taken()
def check_processed_sites(self, last_site: SiteSize, processed_sites: Dict[str, int]):
valid = all(value >= last_site.size for value in processed_sites.values())
if not valid:
raise RuntimeError(
f"Intersection calculation failed:\n"
f"processed sites :{processed_sites},\n"
f"last_site ={last_site} \n"
f"ordered sites = {self.ordered_sites} \n"
)
def check_final_intersection_sizes(self, intersect_site: SiteSize):
all_equal = all(value == intersect_site.size for value in self.backward_processed.values())
if not all_equal:
raise RuntimeError(
f"Intersection calculation failed: the intersection sizes from all sites must be equal.\n"
f"backward processed sites:{self.backward_processed},\n"
f"intersect sites ={intersect_site} \n"
f"ordered sites = {self.ordered_sites} \n"
)
else:
self.log_info(self.fl_ctx, "Intersection calculation succeed")
def log_pass_time_taken(self):
self.log_info(self.fl_ctx, f"'forward_pass' took {self.forward_pass.time_taken} ms.")
self.log_info(self.fl_ctx, f"'backward_pass' took {self.backward_pass.time_taken} ms.")
def post_process(self, abort_signal: Signal):
pass
def finalize(self, fl_ctx: FLContext):
pass
@staticmethod
def get_ordered_sites(results: Dict[str, DXO]):
def compare_fn(e):
return e.size
site_sizes = []
for site_name in results:
data = results[site_name].data
if PSIConst.ITEMS_SIZE in data:
size = data[PSIConst.ITEMS_SIZE]
else:
size = 0
if size > 0:
c = SiteSize(site_name, size)
site_sizes.append(c)
site_sizes.sort(key=compare_fn)
return site_sizes
@measure_time
def forward_pass(self, ordered_sites: List[SiteSize], processed: Dict[str, int]) -> SiteSize:
if self.abort_signal.triggered:
return ordered_sites[0]
total_sites = len(ordered_sites)
if total_sites <= 1:
return ordered_sites[0]
return self.parallel_forward_pass(ordered_sites, processed)
def pairwise_setup(self, ordered_sites: List[SiteSize]):
total_sites = len(ordered_sites)
n = int(total_sites / 2)
task_inputs = {}
for i in range(n):
s = ordered_sites[i]
c = ordered_sites[i + n]
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_SETUP
inputs[PSIConst.ITEMS_SIZE] = c.size
task_inputs[s.name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
return {site_name: results[site_name].data[PSIConst.SETUP_MSG] for site_name in results}
def pairwise_requests(self, ordered_sites: List[SiteSize], setup_msgs: Dict[str, str]):
total_sites = len(ordered_sites)
n = int(total_sites / 2)
task_inputs = {}
for i in range(n):
s = ordered_sites[i]
c = ordered_sites[i + n]
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_REQUEST
inputs[PSIConst.SETUP_MSG] = setup_msgs[s.name]
task_inputs[c.name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
return {site_name: results[site_name].data[PSIConst.REQUEST_MSG] for site_name in results}
def pairwise_responses(self, ordered_sites: List[SiteSize], request_msgs: Dict[str, str]):
total_sites = len(ordered_sites)
n = int(total_sites / 2)
task_inputs = {}
for i in range(n):
s = ordered_sites[i]
c = ordered_sites[i + n]
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_RESPONSE
inputs[PSIConst.REQUEST_MSG] = request_msgs[c.name]
task_inputs[s.name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
return {site_name: results[site_name].data[PSIConst.RESPONSE_MSG] for site_name in results}
def pairwise_intersect(self, ordered_sites: List[SiteSize], response_msg: Dict[str, str]):
total_sites = len(ordered_sites)
n = int(total_sites / 2)
task_inputs = {}
for i in range(n):
s = ordered_sites[i]
c = ordered_sites[i + n]
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_INTERSECT
inputs[PSIConst.RESPONSE_MSG] = response_msg[s.name]
task_inputs[c.name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
return {site_name: results[site_name].data[PSIConst.ITEMS_SIZE] for site_name in results}
def parallel_forward_pass(self, target_sites, processed: dict):
self.log_info(self.fl_ctx, f"target_sites: {target_sites}")
total_sites = len(target_sites)
if total_sites < 2:
final_site = target_sites[0]
processed.update({final_site.name: final_site.size})
return final_site
else:
setup_msgs = self.pairwise_setup(target_sites)
request_msgs = self.pairwise_requests(target_sites, setup_msgs)
response_msgs = self.pairwise_responses(target_sites, request_msgs)
it_sites = self.pairwise_intersect(target_sites, response_msgs)
processed.update(it_sites)
new_targets = [SiteSize(site.name, it_sites[site.name]) for site in target_sites if site.name in it_sites]
if total_sites % 2 == 1:
new_targets.append(target_sites[total_sites - 1])
return self.parallel_forward_pass(new_targets, processed)
@measure_time
def backward_pass(self, ordered_clients: list, intersect_site: SiteSize) -> dict:
processed = {}
if self.abort_signal.triggered:
return processed
total_clients = len(ordered_clients)
if total_clients <= 1:
return processed
status = self.parallel_backward_pass(ordered_clients, intersect_site)
time_taken = self.parallel_backward_pass.time_taken
self.log_info(self.fl_ctx, f"parallel_back_pass took {time_taken} (ms)")
return status
@measure_time
def parallel_backward_pass(self, ordered_clients: list, intersect_site: SiteSize):
# parallel version
other_sites = [site for site in ordered_clients if site.name != intersect_site.name]
other_sites = self.get_updated_site_sizes(other_sites)
s = intersect_site
other_site_sizes = set([site.size for site in other_sites])
setup_msgs: Dict[str, str] = self.prepare_setup_messages(s, other_site_sizes)
site_setup_msgs = {site.name: setup_msgs[str(site.size)] for site in other_sites}
request_msgs: Dict[str, str] = self.create_requests(site_setup_msgs)
response_msgs: Dict[str, str] = self.process_requests(s, request_msgs)
return self.calculate_intersections(response_msgs)
def calculate_intersections(self, response_msg) -> Dict[str, int]:
task_inputs = {}
for client_name in response_msg:
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_INTERSECT
inputs[PSIConst.RESPONSE_MSG] = response_msg[client_name]
task_inputs[client_name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
intersects = {client_name: results[client_name].data[PSIConst.ITEMS_SIZE] for client_name in results}
self.log_info(self.fl_ctx, f"received intersections : {intersects} ")
return intersects
def process_requests(self, s: SiteSize, request_msgs: Dict[str, str]) -> Dict[str, str]:
task_inputs = Shareable()
task_inputs[PSIConst.TASK_KEY] = PSIConst.TASK_RESPONSE
task_inputs[PSIConst.REQUEST_MSG_SET] = request_msgs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.broadcast_and_wait(
task_name=self.task_name, task_input=task_inputs, targets=[s.name], abort_signal=self.abort_signal
)
dxo = results[s.name]
response_msgs = dxo.data[PSIConst.RESPONSE_MSG]
return response_msgs
def create_requests(self, site_setup_msgs) -> Dict[str, str]:
task_inputs = {}
for client_name in site_setup_msgs:
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_REQUEST
inputs[PSIConst.SETUP_MSG] = site_setup_msgs[client_name]
task_inputs[client_name] = inputs
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.multicasts_and_wait(
task_name=self.task_name, task_inputs=task_inputs, abort_signal=self.abort_signal
)
request_msgs = {client_name: results[client_name].data[PSIConst.REQUEST_MSG] for client_name in results}
return request_msgs
def get_updated_site_sizes(self, ordered_sites):
updated_sites = []
for site in ordered_sites:
new_size = self.forward_processed.get(site.name, site.size)
updated_sites.append(SiteSize(site.name, new_size))
return updated_sites
def prepare_sites(self, abort_signal):
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_PREPARE
inputs[PSIConst.BLOOM_FILTER_FPR] = self.bloom_filter_fpr
targets = None
engine = self.fl_ctx.get_engine()
min_responses = len(engine.get_clients())
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.broadcast_and_wait(
task_name=self.task_name,
task_input=inputs,
targets=targets,
min_responses=min_responses,
abort_signal=abort_signal,
)
self.log_info(self.fl_ctx, f"{PSIConst.TASK_PREPARE} results = {results}")
if not results:
abort_signal.trigger("no items to perform PSI")
raise RuntimeError("There is no item to perform PSI calculation")
else:
self.ordered_sites = self.get_ordered_sites(results)
def prepare_setup_messages(self, s: SiteSize, other_site_sizes: Set[int]) -> Dict[str, str]:
inputs = Shareable()
inputs[PSIConst.TASK_KEY] = PSIConst.TASK_SETUP
inputs[PSIConst.ITEMS_SIZE_SET] = other_site_sizes
bop = BroadcastAndWait(self.fl_ctx, self.controller)
results = bop.broadcast_and_wait(
task_name=self.task_name, task_input=inputs, targets=[s.name], abort_signal=self.abort_signal
)
dxo = results[s.name]
return dxo.data[PSIConst.SETUP_MSG]
| NVFlare-main | nvflare/app_common/psi/dh_psi/dh_psi_workflow.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.he.model_encryptor",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.he.model_encryptor import HEModelEncryptor
| NVFlare-main | nvflare/app_common/homomorphic_encryption/he_model_encryptor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_common/homomorphic_encryption/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.he.model_decryptor",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.he.model_decryptor import HEModelDecryptor
| NVFlare-main | nvflare/app_common/homomorphic_encryption/he_model_decryptor.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.he.intime_accumulate_model_aggregator",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.he.intime_accumulate_model_aggregator import HEInTimeAccumulateWeightedAggregator
| NVFlare-main | nvflare/app_common/homomorphic_encryption/he_intime_accumulate_model_aggregator.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.he.homomorphic_encrypt",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.he.homomorphic_encrypt import count_encrypted_layers, load_tenseal_context_from_workspace
| NVFlare-main | nvflare/app_common/homomorphic_encryption/homomorphic_encrypt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn(
f"This module: {__file__} is deprecated. Please use nvflare.app_opt.he.model_shareable_generator",
category=FutureWarning,
stacklevel=2,
)
# flake8: noqa: F401
from nvflare.app_opt.he.model_shareable_generator import HEModelShareableGenerator
| NVFlare-main | nvflare/app_common/homomorphic_encryption/he_model_shareable_generator.py |
Subsets and Splits