python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from monai.fl.client import ClientAlgoStats
from monai.fl.utils.constants import ExtraItems, FlStatistics
from monai.utils.enums import DataStatsKeys, ImageStatsKeys
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.statistics_spec import Bin, DataType, Feature, Histogram, HistogramType, Statistics
from .utils import convert_dict_keys
class ClientAlgoStatistics(Statistics):
def __init__(self, client_algo_stats_id):
"""Statistics generator that gets data statistics from ClientAlgoStats.
Args:
client_algo_stats_id (str): id pointing to the client_stats object
Returns:
a Shareable with the computed local statistics`
"""
super().__init__()
self.client_algo_stats_id = client_algo_stats_id
self.client_name = None
self.client_algo_stats = None
self.stats = None
self.histograms = None
self.fl_ctx = None
self.req_num_of_bins = None
self.req_bin_ranges = None
self.feature_names = None
def initialize(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
self.client_name = fl_ctx.get_identity_name()
engine = fl_ctx.get_engine()
self.client_algo_stats = engine.get_component(self.client_algo_stats_id)
if not isinstance(self.client_algo_stats, ClientAlgoStats):
raise TypeError(f"client_stats must be client_stats type. Got: {type(self.client_algo_stats)}")
self.client_algo_stats.initialize(
extra={
ExtraItems.CLIENT_NAME: fl_ctx.get_identity_name(),
ExtraItems.APP_ROOT: fl_ctx.get_prop(FLContextKey.APP_ROOT),
}
)
def pre_run(
self,
statistics: List[str],
num_of_bins: Optional[Dict[str, Optional[int]]],
bin_ranges: Optional[Dict[str, Optional[List[float]]]],
):
if num_of_bins:
self.req_num_of_bins = list(num_of_bins.values())
_feature_names = list(num_of_bins.keys())
else:
self.req_num_of_bins = []
_feature_names = None
if bin_ranges:
self.req_bin_ranges = list(bin_ranges.values())
else:
self.req_bin_ranges = []
requested_stats = {
FlStatistics.STATISTICS: statistics,
FlStatistics.HIST_BINS: self.req_num_of_bins,
FlStatistics.HIST_RANGE: self.req_bin_ranges,
FlStatistics.FEATURE_NAMES: _feature_names,
}
self.stats = self.client_algo_stats.get_data_stats(extra=requested_stats).statistics
# parse histograms
self.histograms = {}
for dataset_name in self.stats:
self.histograms[dataset_name] = {}
hist_list = self.stats[dataset_name][FlStatistics.DATA_STATS][DataStatsKeys.IMAGE_HISTOGRAM][
ImageStatsKeys.HISTOGRAM
]
# if only one histogram feature was given, use that to name each feature for all image channels.
# Else, use the given feature names
n_hists = len(hist_list)
if len(_feature_names) == 1:
fn = _feature_names[0]
if n_hists > 1 and fn != "*":
raise ValueError(
f"There are more returned histograms ({n_hists}) for dataset "
f"{dataset_name} than provided feature names. "
f"Please use '*' to define the histogram bins and range for all features "
f"or provide histograms bins and range for each feature."
)
if fn == "*":
fn = "Intensity"
if n_hists > 1:
self.feature_names = [f"{fn}-{i}" for i in range(n_hists)]
else:
self.feature_names = [fn]
else:
self.feature_names = _feature_names
if len(self.feature_names) != n_hists:
raise ValueError(
f"Given length of feature names {self.feature_names} ({len(self.feature_names)}) "
f"do not match returned histograms ({n_hists}) for dataset {dataset_name}!"
)
for _hist_fn, _histo in zip(self.feature_names, hist_list):
self.histograms[dataset_name][_hist_fn] = _histo
# convert dataset names to str to support FOBS
return convert_dict_keys(self.stats)
def features(self) -> Dict[str, List[Feature]]:
features = {}
for ds in self.stats:
# convert dataset names to str to support FOBS
features[str(ds)] = []
for feat_name in self.feature_names:
features[str(ds)].append(Feature(feat_name, DataType.FLOAT))
return features
def count(self, dataset_name: str, feature_name: str) -> int:
if dataset_name in self.stats:
return self.stats[dataset_name].get(FlStatistics.DATA_COUNT)
else:
self.log_warning(self.fl_ctx, f"No such dataset {dataset_name}")
return 0
def failure_count(self, dataset_name: str, feature_name: str) -> int:
if dataset_name in self.stats:
return self.stats[dataset_name].get(FlStatistics.FAIL_COUNT)
else:
self.log_warning(self.fl_ctx, f"No such dataset {dataset_name}")
return 0
def histogram(
self, dataset_name: str, feature_name: str, num_of_bins: int, global_min_value: float, global_max_value: float
) -> Histogram:
if dataset_name in self.stats:
if feature_name in self.histograms[dataset_name]:
histo = self.histograms[dataset_name][feature_name]
else:
self.log_warning(
self.fl_ctx,
f"Could not find a matching histogram for feature {feature_name} in dataset {dataset_name}.",
)
return Histogram(HistogramType.STANDARD, list())
else:
self.log_warning(self.fl_ctx, f"No such dataset {dataset_name}")
return Histogram(HistogramType.STANDARD, list())
bin_edges = histo["bin_edges"]
counts = histo["counts"]
num_of_bins = len(counts)
histogram_bins: List[Bin] = []
for j in range(num_of_bins):
low_value = bin_edges[j]
high_value = bin_edges[j + 1]
bin_sample_count = counts[j]
histogram_bins.append(Bin(low_value=low_value, high_value=high_value, sample_count=bin_sample_count))
return Histogram(HistogramType.STANDARD, histogram_bins)
| NVFlare-main | integration/monai/monai_nvflare/client_algo_statistics.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .client_algo_executor import ClientAlgoExecutor
from .client_algo_statistics import ClientAlgoStatistics
from .monai_bundle_persistor import MonaiBundlePersistor
from .monai_data_stats_persistor import MonaiDataStatsPersistor
| NVFlare-main | integration/monai/monai_nvflare/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
def convert_dict_keys(src_dict):
"""Convert dictionary to simple types"""
return json.loads(json.dumps(src_dict))
| NVFlare-main | integration/monai/monai_nvflare/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from monai.bundle import ConfigParser
from monai.bundle.config_item import ConfigItem
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import DefaultCheckpointFileName
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
class MonaiBundlePersistor(PTFileModelPersistor):
def __init__(
self,
bundle_root: str,
config_train_filename: str = "configs/train.json",
network_def_key: str = "network_def",
exclude_vars=None,
global_model_file_name=DefaultCheckpointFileName.GLOBAL_MODEL,
best_global_model_file_name=DefaultCheckpointFileName.BEST_GLOBAL_MODEL,
source_ckpt_filename=None,
filter_id: str = None,
):
"""Persist pytorch-based from MONAI bundle configuration.
Args:
bundle_root (str): path of bundle.
config_train_filename (str, optional): bundle training config path relative to bundle_root;
defaults to "configs/train.json".
network_def_key (str, optional): key that defines network inside bundle
exclude_vars (str, optional): regex expression specifying weight vars to be excluded from training.
Defaults to None.
global_model_file_name (str, optional): file name for saving global model.
Defaults to DefaultCheckpointFileName.GLOBAL_MODEL.
best_global_model_file_name (str, optional): file name for saving best global model.
Defaults to DefaultCheckpointFileName.BEST_GLOBAL_MODEL.
source_ckpt_filename (str, optional): file name for source model checkpoint file relative to `bundle_root`.
Defaults to None.
filter_id: Optional string that defines a filter component that is applied to prepare the model to be saved,
e.g. for serialization of custom Python objects.
Raises:
ValueError: when source_ckpt_filename does not exist
"""
super().__init__(
model=None, # will be set in handle_event
exclude_vars=exclude_vars,
global_model_file_name=global_model_file_name,
best_global_model_file_name=best_global_model_file_name,
source_ckpt_file_full_name=None, # will be set in _parse_config
filter_id=filter_id,
)
self.bundle_root = bundle_root
self.config_train_filename = config_train_filename
self.network_def_key = network_def_key
self.source_ckpt_filename = source_ckpt_filename
self.train_parser = None
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
# get model from bundle network config
self._parse_config(fl_ctx)
self.model = self._get_model(fl_ctx)
# loading happens in superclass handle_event
super().handle_event(event=event, fl_ctx=fl_ctx)
def _parse_config(self, fl_ctx: FLContext):
self.train_parser = ConfigParser()
# Read bundle config files
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.bundle_root = os.path.join(app_root, self.bundle_root)
self.train_parser.read_config(os.path.join(self.bundle_root, self.config_train_filename))
if self.source_ckpt_filename:
self.source_ckpt_file_full_name = os.path.join(self.bundle_root, self.source_ckpt_filename)
def _get_model(self, fl_ctx: FLContext):
try:
# Get network config
network = self.train_parser.get_parsed_content(
self.network_def_key, default=ConfigItem(None, self.network_def_key)
)
if network is None:
raise ValueError(
f"Couldn't parse the network definition from {self.config_train_filename}. "
f"BUNDLE_ROOT was {self.bundle_root}."
)
return network
except Exception as e:
self.log_exception(fl_ctx, f"initialize exception: {e}")
| NVFlare-main | integration/monai/monai_nvflare/monai_bundle_persistor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monai.fl.client import ClientAlgo
from monai.fl.utils.constants import ExtraItems, FlStatistics, ModelType, WeightType
from monai.fl.utils.exchange_object import ExchangeObject
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
def exchangeobj_from_shareable(shareable: Shareable):
dxo = from_shareable(shareable)
eo = ExchangeObject(weights=dxo.data)
return eo
def weights_to_numpy(exchange_object: ExchangeObject):
if not exchange_object.is_valid_weights():
raise ValueError(f"global_model ExchangeObject is not valid: {exchange_object}")
weights = exchange_object.weights
for name in weights:
weights[name] = weights[name].detach().cpu().numpy()
exchange_object.weights = weights
return exchange_object
class ClientAlgoExecutor(Executor):
def __init__(
self,
client_algo_id,
stats_sender_id=None,
train_task=AppConstants.TASK_TRAIN,
submit_model_task=AppConstants.TASK_SUBMIT_MODEL,
validate_task=AppConstants.TASK_VALIDATION,
key_metric: str = "accuracy",
):
"""Key component to run client_algo on clients.
Args:
client_algo_id (str): id pointing to the client_algo object
stats_sender_id (str, optional): id pointing to the LogWriterForMetricsExchanger object
train_task (str, optional): label to dispatch train task. Defaults to AppConstants.TASK_TRAIN.
submit_model_task (str, optional): label to dispatch submit model task. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task (str, optional): label to dispatch validation task. Defaults to AppConstants.TASK_VALIDATION.
"""
super().__init__()
self.client_algo_id = client_algo_id
self.stats_sender_id = stats_sender_id
self.client_algo = None
self.train_task = train_task
self.submit_model_task = submit_model_task
self.validate_task = validate_task
self.client_id = None
self.key_metric = key_metric
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.ABORT_TASK:
try:
self.log_info(fl_ctx, "Aborting ClientAlgo execution...")
if self.client_algo:
self.client_algo.abort(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"client_algo abort exception: {e}")
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
elif event_type == EventType.SWAP_OUT: # only used during simulation
self.finalize(fl_ctx)
def initialize(self, fl_ctx: FLContext):
try:
self.client_id = fl_ctx.get_identity_name()
engine = fl_ctx.get_engine()
stats_sender = engine.get_component(self.stats_sender_id) if self.stats_sender_id else None
self.client_algo = engine.get_component(self.client_algo_id)
if not isinstance(self.client_algo, ClientAlgo):
raise TypeError(f"client_algo must be client_algo type. Got: {type(self.client_algo)}")
self.client_algo.initialize(
extra={
ExtraItems.CLIENT_NAME: fl_ctx.get_identity_name(),
ExtraItems.APP_ROOT: fl_ctx.get_prop(FLContextKey.APP_ROOT),
ExtraItems.STATS_SENDER: stats_sender,
}
)
except Exception as e:
self.log_exception(fl_ctx, f"client_algo initialize exception: {e}")
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
try:
if task_name == self.train_task:
return self.train(shareable, fl_ctx, abort_signal)
elif task_name == self.submit_model_task:
return self.submit_model(shareable, fl_ctx)
elif task_name == self.validate_task:
return self.validate(shareable, fl_ctx, abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"client_algo execute exception: {e}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_debug(fl_ctx, f"train abort signal: {abort_signal.triggered}")
shareable.set_header(AppConstants.VALIDATE_TYPE, ValidateType.BEFORE_TRAIN_VALIDATE)
test_report = self.client_algo.evaluate(exchangeobj_from_shareable(shareable))
test_key_metric = test_report.metrics.get(self.key_metric)
self.log_info(
fl_ctx, f"{self.client_id} reported key metric {self.key_metric}: {test_key_metric}"
) # only return key metric here
validate_result = DXO(
data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: test_key_metric}
).to_shareable()
self.client_algo.train(exchangeobj_from_shareable(shareable), extra={ExtraItems.ABORT: abort_signal})
local_weights_eo = self.client_algo.get_weights()
# check returned weights dict
if local_weights_eo.weights is None:
self.log_error(fl_ctx, "Returned exchange object doesn't contain weights.")
return make_reply(ReturnCode.ERROR)
# convert MONAI's WeightType to NVFlare's DataKind
if local_weights_eo.weight_type == WeightType.WEIGHTS:
data_kind = DataKind.WEIGHTS
elif local_weights_eo.weight_type == WeightType.WEIGHT_DIFF:
data_kind = DataKind.WEIGHT_DIFF
else:
self.log_error(
fl_ctx,
f"Returned `WeightType` not supported. Expected {WeightType.WEIGHTS} or {WeightType.WEIGHT_DIFF},"
f" but got {local_weights_eo.get_weight_type()}",
)
return make_reply(ReturnCode.ERROR)
# get the number of executed steps
statistics = local_weights_eo.statistics
executed_steps = statistics.get(FlStatistics.NUM_EXECUTED_ITERATIONS)
if executed_steps:
meta = {MetaKey.NUM_STEPS_CURRENT_ROUND: executed_steps}
else:
meta = None
# Get returned weights
local_weights_eo = weights_to_numpy(local_weights_eo)
train_result = DXO(data_kind=data_kind, data=local_weights_eo.weights, meta=meta).to_shareable()
# Note, optionally could also support returned optimizer state
# if the client_algo returned the valid BEFORE_TRAIN_VALIDATE result, set the INITIAL_METRICS in
# the train result, which can be used for best model selection.
if (
validate_result
and isinstance(validate_result, Shareable)
and validate_result.get_return_code() == ReturnCode.OK
):
try:
metrics_dxo = from_shareable(validate_result)
train_dxo = from_shareable(train_result)
train_dxo.meta[MetaKey.INITIAL_METRICS] = metrics_dxo.data.get(MetaKey.INITIAL_METRICS, 0)
return train_dxo.to_shareable()
except ValueError:
return train_result
else:
return train_result
def submit_model(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
model_name = shareable.get_header(AppConstants.SUBMIT_MODEL_NAME)
# select MONAI's ModelType based on NVFlare's model_name
if model_name == ModelName.BEST_MODEL:
model_type = ModelType.BEST_MODEL
elif model_name == ModelName.FINAL_MODEL:
model_type = ModelType.FINAL_MODEL
else:
self.log_error(
fl_ctx,
f"Requested `ModelName` not supported. Expected {ModelName.BEST_MODEL} or {ModelName.FINAL_MODEL},"
f" but got {model_name}",
)
return make_reply(ReturnCode.ERROR)
local_weights_eo = self.client_algo.get_weights(extra={ExtraItems.MODEL_TYPE: model_type})
if local_weights_eo.weights is not None:
local_weights_eo = weights_to_numpy(local_weights_eo)
return DXO(data_kind=DataKind.WEIGHTS, data=local_weights_eo.weights).to_shareable()
else:
return make_reply(ReturnCode.EMPTY_RESULT)
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_debug(fl_ctx, f"validate abort_signal {abort_signal.triggered}")
shareable.set_header(AppConstants.VALIDATE_TYPE, ValidateType.MODEL_VALIDATE)
test_report = self.client_algo.evaluate(exchangeobj_from_shareable(shareable))
if test_report.metrics is not None:
return DXO(data_kind=DataKind.METRICS, data=test_report.metrics).to_shareable()
else:
return make_reply(ReturnCode.EMPTY_RESULT)
def finalize(self, fl_ctx: FLContext):
try:
if self.client_algo:
self.client_algo.finalize(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"client_algo finalize exception: {e}")
| NVFlare-main | integration/monai/monai_nvflare/client_algo_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from collections.abc import Callable, Sequence
from typing import TYPE_CHECKING, Any
import torch
from monai.config import IgniteInfo
from monai.utils import is_scalar, min_version, optional_import
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.app_common.tracking.log_writer_me import LogWriterForMetricsExchanger
from nvflare.app_common.tracking.tracker_types import LogWriterName
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import(
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
)
ANALYTIC_EVENT_TYPE = "analytix_log_stats"
DEFAULT_TAG = "Loss"
class NVFlareStatsHandler(LogWriterForMetricsExchanger):
"""
NVFlareStatsHandler defines a set of Ignite Event-handlers for all the NVFlare ``LogWriterForMetricsExchanger`` logics.
It can be used for any Ignite Engine(trainer, validator and evaluator).
And it can support both epoch level and iteration level with pre-defined LogWriterForMetricsExchanger event sender.
The expected data source is Ignite ``engine.state.output`` and ``engine.state.metrics``.
Default behaviors:
- When EPOCH_COMPLETED, write each dictionary item in
``engine.state.metrics`` to TensorBoard.
- When ITERATION_COMPLETED, write each dictionary item in
``self.output_transform(engine.state.output)`` to TensorBoard.
"""
def __init__(
self,
iteration_log: bool | Callable[[Engine, int], bool] = True,
epoch_log: bool | Callable[[Engine, int], bool] = True,
output_transform: Callable = lambda x: x[0],
global_epoch_transform: Callable = lambda x: x,
state_attributes: Sequence[str] | None = None,
state_attributes_type: AnalyticsDataType | None = None,
tag_name: str = DEFAULT_TAG,
metrics_exchanger_id: str = None,
) -> None:
"""
Args:
iteration_log: whether to send data when iteration completed, default to `True`.
``iteration_log`` can be also a function and it will be interpreted as an event filter
(see https://pytorch.org/ignite/generated/ignite.engine.events.Events.html for details).
Event filter function accepts as input engine and event value (iteration) and should return True/False.
epoch_log: whether to send data when epoch completed, default to `True`.
``epoch_log`` can be also a function and it will be interpreted as an event filter.
See ``iteration_log`` argument for more details.
output_transform: a callable that is used to transform the
``ignite.engine.state.output`` into a scalar to plot, or a dictionary of {key: scalar}.
In the latter case, the output string will be formatted as key: value.
By default this value plotting happens when every iteration completed.
The default behavior is to print loss from output[0] as output is a decollated list
and we replicated loss value for every item of the decollated list.
`engine.state` and `output_transform` inherit from the ignite concept:
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
global_epoch_transform: a callable that is used to customize global epoch number.
For example, in evaluation, the evaluator engine might want to use trainer engines epoch number
when plotting epoch vs metric curves.
state_attributes: expected attributes from `engine.state`, if provided, will extract them
when epoch completed.
state_attributes_type: the type of the expected attributes from `engine.state`.
Only required when `state_attributes` is not None.
tag_name: when iteration output is a scalar, tag_name is used to plot, defaults to ``'Loss'``.
metrics_exchanger_id (str): provided for LogWriter to get MetricsExchanger
"""
super().__init__(metrics_exchanger_id=metrics_exchanger_id)
self.iteration_log = iteration_log
self.epoch_log = epoch_log
self.output_transform = output_transform
self.global_epoch_transform = global_epoch_transform
self.state_attributes = state_attributes
self.state_attributes_type = state_attributes_type
self.tag_name = tag_name
def attach(self, engine: Engine) -> None:
"""
Register a set of Ignite Event-Handlers to a specified Ignite engine.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self.iteration_log and not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
event = Events.ITERATION_COMPLETED
if callable(self.iteration_log): # substitute event with new one using filter callable
event = event(event_filter=self.iteration_log)
engine.add_event_handler(event, self.iteration_completed)
if self.epoch_log and not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED):
event = Events.EPOCH_COMPLETED
if callable(self.epoch_log): # substitute event with new one using filter callable
event = event(event_filter=self.epoch_log)
engine.add_event_handler(event, self.epoch_completed)
def epoch_completed(self, engine: Engine) -> None:
"""
Handler for train or validation/evaluation epoch completed Event.
Write epoch level events, default values are from Ignite `engine.state.metrics` dict.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
self._default_epoch_sender(engine)
def iteration_completed(self, engine: Engine) -> None:
"""
Handler for train or validation/evaluation iteration completed Event.
Write iteration level events, default values are from Ignite `engine.state.output`.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
self._default_iteration_sender(engine)
def _send_stats(self, _engine: Engine, tag: str, value: Any, data_type: AnalyticsDataType, step: int) -> None:
"""
Write value.
Args:
_engine: Ignite Engine, unused argument.
tag: tag name in the TensorBoard.
value: value of the scalar data for current step.
step: index of current step.
"""
self.log(key=tag, value=value, data_type=data_type, global_step=step)
def _default_epoch_sender(self, engine: Engine) -> None:
"""
Execute epoch level event write operation.
Default to write the values from Ignite `engine.state.metrics` dict and
write the values of specified attributes of `engine.state`.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
current_epoch = self.global_epoch_transform(engine.state.epoch)
summary_dict = engine.state.metrics
for name, value in summary_dict.items():
print(f"\n\t{name}", type(value), value)
self._send_stats(engine, name, value, AnalyticsDataType.SCALAR, current_epoch)
if self.state_attributes is not None:
for attr in self.state_attributes:
self._send_stats(
engine, attr, getattr(engine.state, attr, None), self.state_attributes_type, current_epoch
)
def _default_iteration_sender(self, engine: Engine) -> None:
"""
Execute iteration level event write operation based on Ignite `engine.state.output` data.
Extract the values from `self.output_transform(engine.state.output)`.
Since `engine.state.output` is a decollated list and we replicated the loss value for every item
of the decollated list, the default behavior is to track the loss from `output[0]`.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
loss = self.output_transform(engine.state.output)
if loss is None:
return # do nothing if output is empty
if isinstance(loss, dict):
for name in sorted(loss):
value = loss[name]
if not is_scalar(value):
warnings.warn(
"ignoring non-scalar output in NVFlareStatsHandler,"
" make sure `output_transform(engine.state.output)` returns"
" a scalar or dictionary of key and scalar pairs to avoid this warning."
" {}:{}".format(name, type(value))
)
continue # not plot multi dimensional output
self._send_stats(
_engine=engine,
tag=name,
data_type=AnalyticsDataType.SCALAR,
value=value.item() if isinstance(value, torch.Tensor) else value,
step=engine.state.iteration,
)
elif is_scalar(loss): # not printing multi dimensional output
self._send_stats(
_engine=engine,
tag=self.tag_name,
data_type=AnalyticsDataType.SCALAR,
value=loss.item() if isinstance(loss, torch.Tensor) else loss,
step=engine.state.iteration,
)
else:
warnings.warn(
"ignoring non-scalar output in NVFlareStatsHandler,"
" make sure `output_transform(engine.state.output)` returns"
" a scalar or a dictionary of key and scalar pairs to avoid this warning."
" {}".format(type(loss))
)
def get_writer_name(self) -> LogWriterName:
"""Not used, just for abstractmethod"""
return LogWriterName.MLFLOW
| NVFlare-main | integration/monai/monai_nvflare/nvflare_stats_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
def data_args():
parser = argparse.ArgumentParser(description="Combine a list of jsonl files")
parser.add_argument("--file_list", nargs="+", required=True, help="Path to input file list")
parser.add_argument("--output_path", type=str, required=True, help="Path to output file")
args = parser.parse_args()
return args
def main():
args = data_args()
# load training data
file_list = args.file_list
data_combined = pd.DataFrame()
for file in file_list:
data = pd.read_json(file, lines=True)
data_combined = pd.concat([data_combined, data])
# randomize the order of the data
data_combined = data_combined.sample(frac=1, random_state=0).reset_index(drop=True)
# save the combined data
output_path = args.output_path
with open(output_path, "w") as f:
f.write(data_combined.to_json(orient="records", lines=True))
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/supervised_fine_tuning/utils/combine_jsonl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import shutil
def load_config(config_file):
with open(config_file, "r") as f:
return json.load(f)
def save_config(config_file, config):
with open(config_file, "w") as f:
json.dump(config, f, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--job_folder",
type=str,
help="Target job folder containing config files.",
)
parser.add_argument("--template_folder", type=str, help="Template job folder", default="jobs/templates")
parser.add_argument("--num_clients", type=int, help="Number of client app folders to generate.", default=1)
parser.add_argument("--devices", type=int, help="Number of GPU devices per client.", default=1)
parser.add_argument(
"--validation_ds_files",
nargs="+",
help="Validation files, one per client.",
)
parser.add_argument(
"--train_ds_files",
nargs="+",
help="Training files files, one per client.",
default="data/FinancialPhraseBank-v1.0_split/site-",
)
args = parser.parse_args()
assert (
args.num_clients == len(args.validation_ds_files) == len(args.train_ds_files)
), "Number of clients should match number of validation and training files."
# create client app folders
for i in range(args.num_clients):
app_folder = os.path.join(args.job_folder, f"app{i+1}")
client_cfg_file = os.path.join(app_folder, "config", "config_fed_client.json")
shutil.copytree(os.path.join(args.template_folder, "client"), app_folder, dirs_exist_ok=True)
# remove unused client config
if isinstance(args.devices, int) and args.devices == 1:
os.remove(os.path.join(app_folder, "config", "config_fed_client_multiprocess.json"))
elif isinstance(args.devices, int) and args.devices > 1:
shutil.move(os.path.join(app_folder, "config", "config_fed_client_multiprocess.json"), client_cfg_file)
else:
raise ValueError(f"Number client devices should be positive integer but was {args.devices}")
# modify client configs
client_cfg = load_config(client_cfg_file)
client_cfg["train_ds_files"] = args.train_ds_files[i]
client_cfg["validation_ds_files"] = args.validation_ds_files[i]
if args.devices > 1:
client_cfg["devices"] = args.devices
save_config(client_cfg_file, client_cfg)
# modify server config
app_folder = os.path.join(args.job_folder, "server")
shutil.copytree(os.path.join(args.template_folder, "server"), app_folder, dirs_exist_ok=True)
server_cfg_file = os.path.join(app_folder, "config", "config_fed_server.json")
server_cfg = load_config(server_cfg_file)
server_cfg["min_clients"] = args.num_clients
save_config(server_cfg_file, server_cfg)
# modify meta.json
meta_cfg_file = os.path.join(args.job_folder, "meta.json")
shutil.copyfile(os.path.join(args.template_folder, "meta.json"), meta_cfg_file)
meta_cfg = load_config(meta_cfg_file)
meta_cfg["name"] = os.path.basename(args.job_folder)
meta_cfg["deploy_map"] = {"server": ["server"]}
for i in range(args.num_clients):
meta_cfg["deploy_map"][f"app{i+1}"] = [f"site-{i+1}"]
save_config(meta_cfg_file, meta_cfg)
print(f"Created configs for {args.num_clients} clients")
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/supervised_fine_tuning/utils/create_configs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
def data_args():
parser = argparse.ArgumentParser(description="Preprocess data to train and validation files in jsonl format")
parser.add_argument("--training_file", type=str, required=True, help="Path to training set")
parser.add_argument("--validation_file", type=str, help="Path to validation set, if given, append to training data")
parser.add_argument("--validation_ratio", type=float, default=0.1, help="Ratio of validation set, defult to 10%")
parser.add_argument("--testing_ratio", type=float, default=0.1, help="Ratio of testing set, defult to 10%")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output folder")
args = parser.parse_args()
return args
def split_to_jsonl(data, output_dir, validation_ratio, testing_ratio):
print("Preprocessing data to NeMo_SFT jsonl format...")
output_path_tra = os.path.join(output_dir, "training.jsonl")
output_path_val = os.path.join(output_dir, "validation.jsonl")
output_path_tst = os.path.join(output_dir, "testing.jsonl")
data_ct = len(data)
val_threshold = int(data_ct * validation_ratio)
test_threshold = int(data_ct * testing_ratio)
with open(output_path_val, "w") as g, open(output_path_tst, "w") as h, open(output_path_tra, "w") as i:
for index, item in data.iterrows():
input = item["input"].strip()
if input != "":
# Randomize input and instruction order.
input_first = np.random.randint(0, 2) == 0
if input_first:
instruction = item["instruction"].strip()
assert instruction != ""
input = f"{input}\n\n{instruction}"
output = item["output"]
else:
instruction = item["instruction"].strip()
assert instruction != ""
input = f"{instruction}\n\n{input}"
output = item["output"]
else:
input = item["instruction"]
output = item["output"]
# write to jsonl file according to index
if index < val_threshold:
h.write(json.dumps({"input": input, "output": output}) + "\n")
elif index < val_threshold + test_threshold:
g.write(json.dumps({"input": input, "output": output}) + "\n")
else:
i.write(json.dumps({"input": input, "output": output}) + "\n")
print(f"{index+1} out of {data_ct} Data was successfully preprocessed and saved.")
def main():
args = data_args()
# load training data
path_to_train = args.training_file
ds = pq.read_table(path_to_train)
train = ds.to_pandas()
# load validation data if provided and append to training data
if args.validation_file:
path_to_val = args.validation_file
ds = pq.read_table(path_to_val)
val = ds.to_pandas()
train = pd.concat([train, val])
# randomize the order of the data
data_full = train.sample(frac=1, random_state=0).reset_index(drop=True)
# split data into training, validation and testing
val_ratio = args.validation_ratio
test_ratio = args.testing_ratio
output_dir = args.output_dir
split_to_jsonl(data_full, output_dir, val_ratio, test_ratio)
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/supervised_fine_tuning/utils/preprocess_alpaca.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import numpy as np
import pandas as pd
def data_args():
parser = argparse.ArgumentParser(description="Preprocess data to train and validation files in jsonl format")
parser.add_argument("--training_file", type=str, required=True, help="Path to training set")
parser.add_argument("--validation_file", type=str, help="Path to validation set, if given, append to training data")
parser.add_argument("--validation_ratio", type=float, default=0.1, help="Ratio of validation set, defult to 10%")
parser.add_argument("--testing_ratio", type=float, default=0.1, help="Ratio of testing set, defult to 10%")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output folder")
args = parser.parse_args()
return args
def split_to_jsonl(data, output_dir, validation_ratio, testing_ratio):
print("Preprocessing data to NeMo_SFT jsonl format...")
output_path_tra = os.path.join(output_dir, "training.jsonl")
output_path_val = os.path.join(output_dir, "validation.jsonl")
output_path_tst = os.path.join(output_dir, "testing.jsonl")
data_ct = len(data)
val_threshold = int(data_ct * validation_ratio)
test_threshold = int(data_ct * testing_ratio)
with open(output_path_val, "w") as g, open(output_path_tst, "w") as h, open(output_path_tra, "w") as i:
for index, item in data.iterrows():
context = item["context"].strip()
if context != "":
# Randomize context and instruction order.
context_first = np.random.randint(0, 2) == 0
if context_first:
instruction = item["instruction"].strip()
assert instruction != ""
input = f"{context}\n\n{instruction}"
output = item["response"]
else:
instruction = item["instruction"].strip()
assert instruction != ""
input = f"{instruction}\n\n{context}"
output = item["response"]
else:
input = item["instruction"]
output = item["response"]
# write to jsonl file according to index
if index < val_threshold:
h.write(json.dumps({"input": input, "output": output}) + "\n")
elif index < val_threshold + test_threshold:
g.write(json.dumps({"input": input, "output": output}) + "\n")
else:
i.write(json.dumps({"input": input, "output": output}) + "\n")
print(f"{index+1} out of {data_ct} Data was successfully preprocessed and saved.")
def main():
args = data_args()
# load training data
path_to_train = args.training_file
train = pd.read_json(path_to_train, lines=True)
# load validation data if provided and append to training data
if args.validation_file:
path_to_val = args.validation_file
val = pd.read_json(path_to_val, lines=True)
train = pd.concat([train, val])
# randomize the order of the data
data_full = train.sample(frac=1, random_state=0).reset_index(drop=True)
# split data into training, validation and testing
val_ratio = args.validation_ratio
test_ratio = args.testing_ratio
output_dir = args.output_dir
split_to_jsonl(data_full, output_dir, val_ratio, test_ratio)
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/supervised_fine_tuning/utils/preprocess_dolly.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pandas as pd
import pyarrow.parquet as pq
def data_args():
parser = argparse.ArgumentParser(description="Preprocess data to train and validation files in jsonl format")
parser.add_argument("--training_file", type=str, required=True, help="Path to training set")
parser.add_argument("--validation_file", type=str, help="Path to validation set, if given, append to training data")
parser.add_argument("--validation_ratio", type=float, default=0.1, help="Ratio of validation set, defult to 10%")
parser.add_argument("--testing_ratio", type=float, default=0.1, help="Ratio of testing set, defult to 10%")
parser.add_argument("--output_dir", type=str, required=True, help="Path to output folder")
args = parser.parse_args()
return args
def get_data_for_sft(data):
data_assistant = data[(data.role == "assistant") & (data["rank"] == 0.0)].copy()
data_prompter = data[(data.role == "prompter")].copy()
data_prompter = data_prompter.set_index("message_id")
data_assistant["output"] = data_assistant["text"].values
inputs = []
parent_ids = []
for index, item in data_assistant.iterrows():
input = data_prompter.loc[item.parent_id]
inputs.append(input.text)
parent_ids.append(input.parent_id)
data_assistant["instruction"] = inputs
data_assistant["parent_id"] = parent_ids
data_assistant = data_assistant[data_assistant.lang == "en"]
data_assistant = data_assistant[["instruction", "output"]]
return data_assistant
def split_to_jsonl(data, output_dir, validation_ratio, testing_ratio):
print("Preprocessing data to NeMo_SFT jsonl format...")
output_path_tra = os.path.join(output_dir, "training.jsonl")
output_path_val = os.path.join(output_dir, "validation.jsonl")
output_path_tst = os.path.join(output_dir, "testing.jsonl")
data_ct = len(data)
val_threshold = int(data_ct * validation_ratio)
test_threshold = int(data_ct * testing_ratio)
with open(output_path_val, "w") as g, open(output_path_tst, "w") as h, open(output_path_tra, "w") as i:
for index, item in data.iterrows():
input = item["instruction"]
output = item["output"]
# write to jsonl file according to index
if index < val_threshold:
h.write(json.dumps({"input": input, "output": output}) + "\n")
elif index < val_threshold + test_threshold:
g.write(json.dumps({"input": input, "output": output}) + "\n")
else:
i.write(json.dumps({"input": input, "output": output}) + "\n")
print(f"{index+1} out of {data_ct} Data was successfully preprocessed and saved.")
def main():
args = data_args()
# load training data
path_to_train = args.training_file
ds = pq.read_table(path_to_train)
data = ds.to_pandas()
train = get_data_for_sft(data)
# load validation data if provided and append to training data
if args.validation_file:
path_to_val = args.validation_file
ds = pq.read_table(path_to_val)
data = ds.to_pandas()
val = get_data_for_sft(data)
train = pd.concat([train, val])
# randomize the order of the data
data_full = train.sample(frac=1, random_state=0).reset_index(drop=True)
# split data into training, validation and testing
val_ratio = args.validation_ratio
test_ratio = args.testing_ratio
output_dir = args.output_dir
split_to_jsonl(data_full, output_dir, val_ratio, test_ratio)
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/supervised_fine_tuning/utils/preprocess_oasst1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import shutil
def load_config(config_file):
with open(config_file, "r") as f:
return json.load(f)
def save_config(config_file, config):
with open(config_file, "w") as f:
json.dump(config, f, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--job_folder",
type=str,
help="Folder containing job config files in JSON format.",
)
parser.add_argument(
"--template_folder", type=str, help="Client config template in JSON format.", default="jobs/templates"
)
parser.add_argument("--num_clients", type=int, help="Number of client app folders to generate.", default=3)
parser.add_argument("--aggregation_epochs", type=int, help="Local number of aggregation epochs.", default=1)
parser.add_argument("--num_rounds", type=int, help="Number of FL rounds.", default=1)
parser.add_argument("--devices", type=int, help="Number of GPU devices per client.", default=1)
parser.add_argument(
"--root_dir", type=str, help="Root folder containing the example with data and models.", default=os.getcwd()
)
parser.add_argument(
"--val_ds_files",
type=str,
help="Validation files.",
default="data/FinancialPhraseBank-v1.0/financial_phrase_bank_val.jsonl",
)
parser.add_argument(
"--train_ds_files_prefix",
type=str,
help="Training files prefix.",
default="data/FinancialPhraseBank-v1.0_split/site-",
)
args = parser.parse_args()
# create client app folders
for i in range(args.num_clients):
app_folder = os.path.join(args.job_folder, f"app{i+1}")
client_cfg_file = os.path.join(app_folder, "config", "config_fed_client.json")
shutil.copytree(os.path.join(args.template_folder, "client"), app_folder, dirs_exist_ok=True)
# remove unused client config
if isinstance(args.devices, int) and args.devices == 1:
os.remove(os.path.join(app_folder, "config", "config_fed_client_multiprocess.json"))
elif isinstance(args.devices, int) and args.devices > 1:
shutil.move(os.path.join(app_folder, "config", "config_fed_client_multiprocess.json"), client_cfg_file)
else:
raise ValueError(f"Number client devices should be positive integer but was {args.devices}")
# modify client configs
client_cfg = load_config(client_cfg_file)
client_cfg["ROOT_DIR"] = args.root_dir
client_cfg["train_ds_files"] = os.path.join(args.root_dir, f"{args.train_ds_files_prefix}{i + 1}.jsonl")
client_cfg["val_ds_files"] = os.path.join(args.root_dir, args.val_ds_files)
client_cfg["aggregation_epochs"] = args.aggregation_epochs
if args.devices > 1:
client_cfg["devices"] = args.devices
save_config(client_cfg_file, client_cfg)
# modify server config
app_folder = os.path.join(args.job_folder, "server")
shutil.copytree(os.path.join(args.template_folder, "server"), app_folder, dirs_exist_ok=True)
server_cfg_file = os.path.join(app_folder, "config", "config_fed_server.json")
server_cfg = load_config(server_cfg_file)
server_cfg["min_clients"] = args.num_clients
server_cfg["num_rounds"] = args.num_rounds
if args.devices > 1:
server_cfg["hidden_size"] = 6144 # use for 20B GPT model
save_config(server_cfg_file, server_cfg)
# modify meta.json
meta_cfg_file = os.path.join(args.job_folder, "meta.json")
shutil.copyfile(os.path.join(args.template_folder, "meta.json"), meta_cfg_file)
meta_cfg = load_config(meta_cfg_file)
meta_cfg["name"] = os.path.basename(args.job_folder)
meta_cfg["deploy_map"] = {"server": ["server"]}
for i in range(args.num_clients):
meta_cfg["deploy_map"][f"app{i+1}"] = [f"site-{i+1}"]
save_config(meta_cfg_file, meta_cfg)
print(f"Created configs for {args.num_clients} clients and set ROOT_DIR to {args.root_dir}")
if __name__ == "__main__":
main()
| NVFlare-main | integration/nemo/examples/prompt_learning/create_configs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import pandas as pd
def split_data(data_path, out_dir, num_clients, site_name_prefix, seed):
# use pandas to read jsonl format
train_data = pd.read_json(data_path, lines=True)
assert len(train_data) > 0, f"No data loaded from {data_path}"
print(f"Loaded training data with {len(train_data)} entries")
# shuffle the data
train_data = train_data.sample(frac=1, random_state=seed)
train_data_splits = np.array_split(train_data, num_clients)
for idx, split in enumerate(train_data_splits):
df = pd.DataFrame(split)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_file = os.path.join(out_dir, f"{site_name_prefix}{idx+1}.jsonl")
df.to_json(out_file, orient="records", lines=True)
print(f"Save split {idx+1} of {len(train_data_splits)} with {len(split)} entries to {out_file}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate data split for dataset")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--out_dir", type=str, help="Path to output directory", default=".")
parser.add_argument("--num_clients", type=int, help="Total number of clients", default=3)
parser.add_argument("--random_seed", type=int, help="Random seed", default=0)
parser.add_argument("--site_name_prefix", type=str, help="Site name prefix", default="site-")
args = parser.parse_args()
split_data(
data_path=args.data_path,
out_dir=args.out_dir,
num_clients=args.num_clients,
site_name_prefix=args.site_name_prefix,
seed=args.random_seed,
)
| NVFlare-main | integration/nemo/examples/prompt_learning/data/split_financial_phrase_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import nemo
import numpy as np
import pytorch_lightning as pl
import torch
from nemo.collections.nlp.modules.common import VirtualPromptStyle
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from omegaconf import OmegaConf
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.fuel.utils.network_utils import get_open_ports
from .callbacks import RestoreOptimizers
from .constants import NemoDataKind
from .fed_megatron_gpt_prompt_learning_model import FedMegatronGPTPromptLearningModel
from .utils import compute_model_diff, load_weights
print("NEMO version", nemo.__version__)
# configure logging at the root logging level
logging.getLogger().setLevel(logging.INFO)
def set_datafile_paths(files, app_root):
new_files = []
for f in files:
f = os.path.join(app_root, f)
if not os.path.isfile(f):
raise ValueError(f"No such file {f}!")
new_files.append(f)
return new_files
class PromptLearner(Learner):
def __init__(
self,
config_path: str = None,
train_ds_files: str = "financial_phrase_bank_train.jsonl",
val_ds_files: str = "financial_phrase_bank_val.jsonl",
task_templates_file: str = None,
gpt_file_name: str = "megatron_gpt_345m.nemo",
nemo_path: str = "multitask_p_tuned_gpt.nemo",
exp_name: str = "prompt_learning",
existing_tasks: str = None,
new_tasks: str = "taskname",
aggregation_epochs: int = 1,
master_addr: str = "localhost",
master_port: int = None,
devices: int = 1,
virtual_prompt_style=VirtualPromptStyle.P_TUNING,
key_metric: str = "global_model_val_loss",
negate_key_metric: bool = True,
):
"""Support prompt learning with NeMo
Args:
config_path: NeMo model config file
train_ds_files: Training dataset files.
val_ds_files: Validation dataset files.
task_templates_file: Task template file
gpt_file_name: Pre-trained nemo model file.
nemo_path: Where to store the locally p-tuned model.
exp_name: Name of current experiment.
existing_tasks: Existing task names.
new_tasks: New task name.
aggregation_epochs: the number of training epochs for a round.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
devices: number devices for cluster environment.
virtual_prompt_style: Style of prompt learning method. Defaults to p-tuning (`VirtualPromptStyle.P_TUNING`).
key_metric: Key metric for global model selection. Defaults to `"global_model_val_loss"`.
negate_key_metric: Whether to invert the key metric. Should be used if key metric is a loss. Default to `True`.
Returns:
a Shareable with the updated local model after running `train()`
or the validation metric when calling `validate()`.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.config_path = config_path
self.train_ds_files = train_ds_files
self.val_ds_files = val_ds_files
self.task_templates_file = task_templates_file
self.gpt_file_name = gpt_file_name
self.nemo_path = nemo_path
self.exp_name = exp_name
self.existing_tasks = existing_tasks
self.new_tasks = new_tasks
self.aggregation_epochs = aggregation_epochs
self.master_addr = master_addr
self.master_port = master_port
self.devices = devices
self.virtual_prompt_style = virtual_prompt_style
self.key_metric = key_metric
self.negate_key_metric = negate_key_metric
self.app_root = None
self.client_id = None
self.config = None
self.trainer = None
self.model = None
self.is_configured = False
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_configs(self, configs):
if not isinstance(configs, dict):
raise ValueError(f"Exptected configs to be of type dict but received type {type(configs)}")
# Received primitive dicts from server; convert back to OmegaConf
if NemoDataKind.NEMO_CONFIG in configs:
self.config = OmegaConf.create(configs[NemoDataKind.NEMO_CONFIG])
else:
raise ValueError(f"Received configs did not contain nemo configs! Received keys: {list(configs.keys())}")
if NemoDataKind.TASK_TEMPLATES in configs:
self.config.model.task_templates = OmegaConf.create(configs[NemoDataKind.TASK_TEMPLATES])
else:
raise ValueError(f"Received configs did not contain task templates! Received keys: {list(configs.keys())}")
def initialize(self, parts: dict, fl_ctx: FLContext):
"""
Build model, training & validation data sets
"""
# when the run starts, this is where the actual settings get initialized for trainer
self.log_info(fl_ctx, "Initializing the Learner...")
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.client_id = fl_ctx.get_identity_name()
if self.devices > 1:
# distributed environment is set by PTMultiProcessExecutor
if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ:
raise ValueError(
f"Distributed environment not set up correctly for {self.devices} devices. "
f"Did you use `PTMultiProcessExecutor`?"
)
else:
# Setup cluster environment parameters
# use torch elastic cluster environment so `create_process_externally` is True
# the launcher is set to None. It will not try to spawn new processes.
# It won't create the misconfiguration error because of the `interactive session`
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port) if self.master_port else str(get_open_ports(1)[0])
os.environ["LOCAL_RANK"] = "0"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
self.log_info(
fl_ctx,
f"Running with distributed environment: LOCAL_RANK: {os.environ['LOCAL_RANK']}, "
f"RANK: {os.environ['RANK']}, WORLD_SIZE {os.environ['WORLD_SIZE']}, "
f"MASTER_ADDR: {os.environ['MASTER_ADDR']}, and MASTER_PORT: {os.environ['MASTER_PORT']}",
)
def _check_new_tasks(self):
template_tasks = [t.get("taskname") for t in self.config.model.task_templates]
missing_tasks = []
for _new_task in self.new_tasks:
if _new_task not in template_tasks:
missing_tasks.append(_new_task)
if any(missing_tasks):
raise ValueError(f"New tasks {missing_tasks} not specified in task templates {template_tasks}!")
def _configure(self, fl_ctx: FLContext):
self.log_info(fl_ctx, "Configuring the Learner...")
# Load model configuration
if self.config_path is not None:
if self.config is not None:
self.log_warning(fl_ctx, "Attempting to overwrite config received from server...")
self.config_path = os.path.join(self.app_root, self.config_path)
self.log_info(fl_ctx, f"Load model configuration from {self.config_path}")
self.config = OmegaConf.load(self.config_path)
if self.config is None:
raise ValueError("No configuration was received or loaded!")
# Load task templates
if self.task_templates_file is not None:
if self.config.model.task_templates is not None:
self.log_warning(fl_ctx, "Attempting to overwrite task templates received from server...")
self.task_templates_file = os.path.join(self.app_root, self.task_templates_file)
self.log_info(fl_ctx, f"Load task templates from {self.task_templates_file}")
self.config.model.task_templates = OmegaConf.load(self.task_templates_file)
if self.config.model.task_templates is None:
raise ValueError("No task templates were received or loaded!")
# Specify existing tasks
if not self.existing_tasks:
self.config.model.existing_tasks = []
else:
self.config.model.existing_tasks = self.existing_tasks
# Set tasks to learn
if not isinstance(self.new_tasks, list):
self.new_tasks = [self.new_tasks]
self.config.model.new_tasks = self.new_tasks
# check if all new tasks are in the task templates
self._check_new_tasks()
# Configure training sets
if not isinstance(self.train_ds_files, list):
self.train_ds_files = [self.train_ds_files]
if not isinstance(self.val_ds_files, list):
self.val_ds_files = [self.val_ds_files]
self.config.model.data.train_ds = set_datafile_paths(self.train_ds_files, self.app_root)
self.config.model.data.validation_ds = set_datafile_paths(self.val_ds_files, self.app_root)
# Set GPT model path on prompt learning config
self.config.model.language_model_path = self.gpt_file_name
# We can also set where we want the final prompt tuned model to be saved by setting `model.nemo_path`.
self.config.model.nemo_path = os.path.join(self.app_root, self.nemo_path)
# Setting P-Tuning Specific Params
self.config.model.virtual_prompt_style = self.virtual_prompt_style
# Configure in yaml file
self.log_info(
fl_ctx,
f"Training with global_batch_size {self.config.model.global_batch_size}"
f" and micro_batch_size {self.config.model.micro_batch_size}",
)
# for PyTorch Native AMP set precision=16 (use value from config yaml)
self.config.trainer.accelerator = "gpu" if torch.cuda.is_available() else "cpu"
self.config.model.tensor_model_parallel_size = self.devices
self.config.trainer.devices = self.devices
self.config.trainer.max_epochs = -1 # Needed to continue fit() in next round
strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True)
plugins = [TorchElasticEnvironment()]
# Add TensorBoard logger
self.config.trainer.logger = True
self.config.trainer.default_root_dir = self.app_root
self.trainer = pl.Trainer(
plugins=plugins, strategy=strategy, callbacks=[RestoreOptimizers()], **self.config.trainer
)
self.config.model.precision = self.config.trainer.precision
# Set name of the experiment
self.config.name = self.exp_name
self.log_info(fl_ctx, f"Model config - {OmegaConf.to_yaml(self.config.model)}")
self.log_info(fl_ctx, f"Trainer config - {OmegaConf.to_yaml(self.config.trainer)}")
# The only thing left to do is load up the model and begin p-tuning!
self.model = FedMegatronGPTPromptLearningModel(cfg=self.config.model, trainer=self.trainer)
self.model.init_prompt_encoder()
self.is_configured = True
self.log_info(
fl_ctx, f"Initialized model {type(self.model)} and prompt encoder {type(self.model.prompt_encoder)}"
)
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
if not self.is_configured:
self._configure(fl_ctx)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
if current_round > 0:
self.trainer.num_sanity_val_steps = 0 # Turn off sanity validation steps in 2nd round of FL
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
if not self.model.prompt_encoder:
raise ValueError("Prompt encoder is not available!")
n_loaded = load_weights(self.model, global_weights, device=self.device)
self.log_info(fl_ctx, f"Loaded {n_loaded} of {len(global_weights)} weights")
self.log_info(fl_ctx, f"Start training in round {current_round}")
self.trainer.fit_loop.max_epochs = self.trainer.current_epoch + self.aggregation_epochs
self.model.log_global = False
self.trainer.fit(self.model)
model_diff = compute_model_diff(self.model, global_weights)
self.log_info(
fl_ctx, f"Computed {len(model_diff)} weight differences for global model of length {len(global_weights)}"
)
# Get local steps from data loader
epoch_len = len(self.model._train_dl)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
if not self.is_configured:
self._configure(fl_ctx)
if not self.model.prompt_encoder:
raise ValueError("Prompt encoder is not available!")
n_loaded = load_weights(self.model, global_weights, device=self.device)
self.log_info(fl_ctx, f"Loaded {n_loaded} of {len(global_weights)} weights")
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
self.model.log_global = True # enable logging the global metric
global_metrics = self.trainer.validate(self.model)
metric = global_metrics[0].get(self.key_metric, np.nan)
self.log_info(fl_ctx, f"Global_model {self.key_metric}: {metric}")
if self.negate_key_metric:
metric = -1.0 * metric
# use negative validation loss as validation metric
return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: metric}, meta={}).to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | integration/nemo/nemo_nvflare/prompt_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.executors.learner_executor import LearnerExecutor
from .constants import NemoConstants, NemoDataKind
class NemoLearnerExecutor(LearnerExecutor):
def __init__(
self,
learner_id,
train_task=AppConstants.TASK_TRAIN,
submit_model_task=AppConstants.TASK_SUBMIT_MODEL,
validate_task=AppConstants.TASK_VALIDATION,
share_config_task=NemoConstants.TASK_SHARE_CONFIG,
):
"""Key component to run learner on clients.
Args:
learner_id (str): id of the learner object
train_task (str, optional): task name for train. Defaults to AppConstants.TASK_TRAIN.
submit_model_task (str, optional): task name for submit model. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task (str, optional): task name for validation. Defaults to AppConstants.TASK_VALIDATION.
share_config_task (str, optional): share config task name.
"""
super().__init__(
learner_id=learner_id,
train_task=train_task,
submit_model_task=submit_model_task,
validate_task=validate_task,
)
self.share_config_task = share_config_task
self.is_initialized = False
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if not self.is_initialized:
self.is_initialized = True
self.initialize(fl_ctx)
if task_name == self.share_config_task:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
try:
return self._set_learner_configs(shareable, fl_ctx, abort_signal)
except Exception as e:
self.log_error(fl_ctx, f"Setting config failed with exception {e}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return super().execute(task_name=task_name, shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
def _set_learner_configs(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
dxo = from_shareable(shareable)
if dxo.data_kind != NemoDataKind.CONFIGS:
raise ValueError(f"Expected DXO data to be of kind NemoDataKind.CONFIGS but got {dxo.data_kind}")
if not dxo.data:
raise ValueError("Received config data is empty!")
self.learner.set_configs(configs=dxo.data)
self.log_info(fl_ctx, f"Received config with {len(dxo.data)} entries from server.")
return make_reply(ReturnCode.OK)
| NVFlare-main | integration/nemo/nemo_nvflare/learner_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NemoConstants(object):
TASK_SHARE_CONFIG = "share_config"
class NemoDataKind(object):
CONFIGS = "nemo_configs"
NEMO_CONFIG = "nemo_config"
TASK_TEMPLATES = "nemo_task_templates"
| NVFlare-main | integration/nemo/nemo_nvflare/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import nemo
import torch
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.utils.exp_manager import exp_manager
from omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.fuel.utils.network_utils import get_open_ports
from .callbacks import RestoreOptimizers
from .constants import NemoDataKind
from .utils_sft import compute_model_diff, load_weights
print("NEMO version", nemo.__version__)
# configure logging at the root logging level
logging.getLogger().setLevel(logging.INFO)
def _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original gpt pre-training config (gpt_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(gpt_cfg, True)
OmegaConf.resolve(cfg)
with open_dict(gpt_cfg):
gpt_cfg.megatron_amp_O2 = cfg.model.get("megatron_amp_O2", False)
gpt_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size
gpt_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size
gpt_cfg.sequence_parallel = cfg.model.get("sequence_parallel", False)
gpt_cfg.activations_checkpoint_granularity = cfg.model.get("activations_checkpoint_granularity", None)
gpt_cfg.activations_checkpoint_num_layers = cfg.model.get("activations_checkpoint_num_layers", None)
gpt_cfg.activations_checkpoint_method = cfg.model.get("activations_checkpoint_method", None)
gpt_cfg.data = cfg.model.data
gpt_cfg.optim = cfg.model.optim
gpt_cfg.precision = cfg.trainer.precision
gpt_cfg.answer_only_loss = cfg.model.answer_only_loss
gpt_cfg.restore_from_path = cfg.model.restore_from_path
gpt_cfg.resume_from_checkpoint = cfg.model.resume_from_checkpoint
gpt_cfg.save_nemo_on_validation_end = cfg.model.save_nemo_on_validation_end
gpt_cfg.gradient_as_bucket_view = cfg.model.gradient_as_bucket_view
gpt_cfg.hidden_dropout = cfg.model.get("hidden_dropout", 0.0)
gpt_cfg.attention_dropout = cfg.model.get("attention_dropout", 0.0)
gpt_cfg.ffn_dropout = cfg.model.ffn_dropout
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(gpt_cfg)
gpt_cfg.cfg = gpt_cfg
return gpt_cfg
def load_from_nemo(cls, cfg, trainer, gpt_cfg, modify_config_fn):
gpt_cfg = modify_config_fn(gpt_cfg, cfg, add_cfg_to_tree=False)
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
model = cls.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=gpt_cfg,
save_restore_connector=save_restore_connector,
)
return model
class SFTLearner(Learner):
def __init__(
self,
config_path: str = None,
train_ds_files: str = "financial_phrase_bank_train.jsonl",
validation_ds_files: str = "financial_phrase_bank_val.jsonl",
base_model_file_path: str = "megatron_gpt_345m.nemo",
sft_model_file_path: str = "megatron_gpt_345m_sft.nemo",
aggregation_epochs: int = 1,
master_addr: str = "localhost",
master_port: int = None,
devices: int = 1,
key_metric: str = "val_loss",
):
"""Support SFT (Supervised Fine-Tuning) learning with NeMo
Args:
config_path: NeMo model config file
train_ds_files: Training dataset files.
validation_ds_files: Validation dataset files.
base_model_file_path: Pre-trained nemo model file.
sft_model_file_path: Where to store the local SFT model.
aggregation_epochs: the number of training epochs for a round.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
devices: number devices for cluster environment.
key_metric: Key metric for global model selection. Defaults to `"global_model_validation_loss"`.
Returns:
a Shareable with the updated local model after running `train()`
or the validation metric when calling `validate()`.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.config_path = config_path
self.train_ds_files = train_ds_files
self.validation_ds_files = validation_ds_files
self.base_model_file_path = base_model_file_path
self.sft_model_file_path = sft_model_file_path
self.aggregation_epochs = aggregation_epochs
self.master_addr = master_addr
self.master_port = master_port
self.devices = devices
self.key_metric = key_metric
self.app_root = None
self.client_id = None
self.config = None
self.trainer = None
self.model = None
self.is_configured = False
self.steps_per_round = None
self.scaler = None
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_configs(self, configs):
if not isinstance(configs, dict):
raise ValueError(f"Exptected configs to be of type dict but received type {type(configs)}")
# Received primitive dicts from server; convert back to OmegaConf
if NemoDataKind.NEMO_CONFIG in configs:
self.config = OmegaConf.create(configs[NemoDataKind.NEMO_CONFIG])
else:
raise ValueError(f"Received configs did not contain nemo configs! Received keys: {list(configs.keys())}")
def initialize(self, parts: dict, fl_ctx: FLContext):
"""
Build model, training & validation data sets
"""
# when the run starts, this is where the actual settings get initialized for trainer
self.log_info(fl_ctx, "Initializing the Learner...")
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.client_id = fl_ctx.get_identity_name()
if self.devices > 1:
# distributed environment is set by PTMultiProcessExecutor
if "MASTER_ADDR" not in os.environment or "MASTER_PORT" not in os.environment:
raise ValueError(
f"Distributed environment not set up correctly for {self.devices} devices. "
f"Did you use `PTMultiProcessExecutor`?"
)
else:
# Setup cluster environment parameters
# use torch elastic cluster environment so `create_process_externally` is True
# the launcher is set to None. It will not try to spawn new processes.
# It won't create the misconfiguration error because of the `interactive session`
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port) if self.master_port else str(get_open_ports(1)[0])
os.environ["LOCAL_RANK"] = "0"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
self.log_info(
fl_ctx,
f"Running with distributed environment: LOCAL_RANK: {os.environ['LOCAL_RANK']}, "
f"RANK: {os.environ['RANK']}, WORLD_SIZE {os.environ['WORLD_SIZE']}, "
f"MASTER_ADDR: {os.environ['MASTER_ADDR']}, and MASTER_PORT: {os.environ['MASTER_PORT']}",
)
def _configure(self, fl_ctx: FLContext):
self.log_info(fl_ctx, "Configuring the Learner...")
# Load model configuration
if self.config_path is not None:
if self.config is not None:
self.log_warning(fl_ctx, "Attempting to overwrite config received from server...")
self.config_path = os.path.join(self.app_root, self.config_path)
self.log_info(fl_ctx, f"Load model configuration from {self.config_path}")
self.config = OmegaConf.load(self.config_path)
if self.config is None:
raise ValueError("No configuration was received or loaded!")
# Configure training sets
if not os.path.isfile(self.train_ds_files):
raise ValueError(f"Training data file not found: {self.train_ds_files}!")
if not os.path.isfile(self.validation_ds_files):
raise ValueError(f"Validation data file not found: {self.validation_ds_files}!")
self.config.model.data.train_ds.file_names = [self.train_ds_files]
self.config.model.data.validation_ds.file_names = [self.validation_ds_files]
# Set the base model path for further SFT
self.config.model.restore_from_path = self.base_model_file_path
# We can also set where we want the final SFT tuned model to be saved by setting `model.nemo_path`.
self.config.model.nemo_path = os.path.join(self.app_root, self.sft_model_file_path)
# Configure in yaml file
self.log_info(
fl_ctx,
f"Training with global_batch_size {self.config.model.global_batch_size}"
f" and micro_batch_size {self.config.model.micro_batch_size}",
)
# for PyTorch Native AMP set precision=16 (use value from config yaml)
self.config.trainer.accelerator = "gpu" if torch.cuda.is_available() else "cpu"
self.config.model.tensor_model_parallel_size = self.devices
self.config.trainer.devices = self.devices
# self.config.trainer.max_epochs = -1 # Needed to continue fit() in next round
megatron_amp_o2 = self.config.model.get("megatron_amp_O2", False)
with_distributed_adam = self.config.model.optim.get("name", "fused_adam") == "distributed_fused_adam"
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=self.config.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if self.config.trainer.precision in [16, "bf16"]:
if self.config.trainer.precision == 16:
self.scaler = GradScaler(
init_scale=self.config.model.get("native_amp_init_scale", 2**32),
growth_interval=self.config.model.get("native_amp_growth_interval", 1000),
hysteresis=self.config.model.get("hysteresis", 2),
)
if megatron_amp_o2 and not with_distributed_adam:
plugins.append(
MegatronHalfPrecisionPlugin(
precision=self.config.trainer.precision, device="cuda", scaler=self.scaler
)
)
else:
plugins.append(
PipelineMixedPrecisionPlugin(
precision=self.config.trainer.precision, device="cuda", scaler=self.scaler
)
)
# Add TensorBoard logger
self.config.exp_manager.explicit_log_dir = self.app_root
self.trainer = Trainer(
plugins=plugins, strategy=strategy, callbacks=[RestoreOptimizers()], **self.config.trainer
)
exp_manager(self.trainer, self.config.exp_manager)
self.log_info(fl_ctx, f"Model config - {OmegaConf.to_yaml(self.config.model)}")
self.log_info(fl_ctx, f"Trainer config - {OmegaConf.to_yaml(self.config.trainer)}")
# Load pretrained model from nemo
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(self.config.model.restore_from_path):
save_restore_connector.model_extracted_dir = self.config.model.restore_from_path
gpt_cfg = MegatronGPTSFTModel.restore_from(
restore_path=self.config.model.restore_from_path,
trainer=self.trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
self.model = load_from_nemo(
MegatronGPTSFTModel, self.config, self.trainer, gpt_cfg, modify_config_fn=_modify_config
)
self.is_configured = True
self.log_info(fl_ctx, f"Initialized model {type(self.model)}")
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
if not self.is_configured:
self._configure(fl_ctx)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
if current_round == 0:
self.steps_per_round = self.trainer.fit_loop.max_steps
if current_round > 0:
self.trainer.num_sanity_validation_steps = 0 # Turn off sanity validation steps in 2nd round of FL
self.trainer.fit_loop.max_epochs += self.aggregation_epochs
self.trainer.fit_loop.max_steps += self.steps_per_round
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
n_loaded = load_weights(self.model, global_weights, device=self.device)
self.log_info(fl_ctx, f"Loaded {n_loaded} of {len(global_weights)} weights")
self.log_info(fl_ctx, f"Start training in round {current_round}")
self.log_info(fl_ctx, f"Current max_steps {self.trainer.fit_loop.max_steps}")
self.log_info(fl_ctx, f"Current max_epochs {self.trainer.fit_loop.max_epochs}")
self.model.log_global = False
self.trainer.fit(self.model)
model_diff = compute_model_diff(self.model, global_weights)
self.log_info(
fl_ctx, f"Computed {len(model_diff)} weight differences for global model of length {len(global_weights)}"
)
# Get local steps from data loader
epoch_len = len(self.model._train_dl)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
if not self.is_configured:
self._configure(fl_ctx)
n_loaded = load_weights(self.model, global_weights, device=self.device)
self.log_info(fl_ctx, f"Loaded {n_loaded} of {len(global_weights)} weights")
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
self.model.log_global = True # enable logging the global metric
global_metrics = self.trainer.validate(self.model)
metric = global_metrics[0].get(self.key_metric)
self.log_info(fl_ctx, f"Global_model {self.key_metric}: {metric}")
# use validation loss as validation metric
return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: metric}, meta={}).to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | integration/nemo/nemo_nvflare/sft_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .callbacks import RestoreOptimizers
from .config_sharer import ConfigSharer
from .config_sharer_sft import ConfigSharerSFT
from .fed_megatron_gpt_prompt_learning_model import FedMegatronGPTPromptLearningModel
from .learner_executor import NemoLearnerExecutor
from .prompt_encoder import ServerPromptEncoder
from .prompt_learner import PromptLearner
from .server_sft_model import ServerSFTModel
from .sft_learner import SFTLearner
from .share_config import ShareConfig
from .share_config_sft import ShareConfigSFT
| NVFlare-main | integration/nemo/nemo_nvflare/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.app_common.workflows.broadcast_and_process import BroadcastAndProcess
from .config_sharer_sft import ConfigSharerSFT
from .constants import NemoConstants
class ShareConfigSFT(BroadcastAndProcess):
def __init__(
self,
config_path: str = "config/megatron_gpt_sft.yaml",
task_name: str = NemoConstants.TASK_SHARE_CONFIG,
min_responses_required: int = 0,
wait_time_after_min_received: int = 0,
task_timeout: int = 0,
clients: Union[List[str], None] = None,
):
"""A controller for sharing the NeMo config files with the clients.
Args:
config_path: NeMo model config file
task_templates_file: Task template file
task_name: name of the task to be sent to clients to share configs
min_responses_required: min number of responses required. 0 means all clients.
wait_time_after_min_received: how long (secs) to wait after min responses are received
task_timeout: max amount of time to wait for the task to end. 0 means never time out.
clients: names of the clients to send config. Defaults to `None`.
If `None`, the task will be sent to all clients.
If list of client names, the config will be only be sent to the listed clients.
"""
if clients is not None:
if not isinstance(clients, list):
raise ValueError(f"Expected list of client names but received {clients}")
BroadcastAndProcess.__init__(
self,
processor=ConfigSharerSFT(config_path=config_path),
task_name=task_name,
min_responses_required=min_responses_required,
wait_time_after_min_received=wait_time_after_min_received,
timeout=task_timeout,
clients=clients,
)
| NVFlare-main | integration/nemo/nemo_nvflare/share_config_sft.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf import OmegaConf
from nvflare.apis.client import Client
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.response_processor import ResponseProcessor
from .constants import NemoDataKind
class ConfigSharer(ResponseProcessor):
def __init__(
self,
config_path: str = "config/megatron_gpt_prompt_learning_config.yaml",
task_templates_file: str = "config/task_templates.json",
):
"""Share the NeMo config files with the clients.
Args:
config_path: NeMo model config file
task_templates_file: Task template file
"""
super().__init__()
self.config_path = config_path
self.task_templates_file = task_templates_file
def create_task_data(self, task_name: str, fl_ctx: FLContext) -> Shareable:
"""Create the data for the task to be sent to clients
Args:
task_name: name of the task
fl_ctx: the FL context
Returns: task data
"""
# get app root
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
# Load model configuration to initialize training NeMo environment
self.config_path = os.path.join(app_root, self.config_path)
config = OmegaConf.load(self.config_path)
self.log_info(fl_ctx, f"Load model configuration from {self.config_path}")
# Load task templates
self.task_templates_file = os.path.join(app_root, self.task_templates_file)
self.log_info(fl_ctx, f"Load task templates from {self.task_templates_file}")
task_templates = OmegaConf.load(self.task_templates_file)
configs = {
NemoDataKind.NEMO_CONFIG: OmegaConf.to_container(config),
NemoDataKind.TASK_TEMPLATES: OmegaConf.to_container(task_templates),
}
# convert omega conf to primitive dict
dxo = DXO(data=configs, data_kind=NemoDataKind.CONFIGS)
return dxo.to_shareable()
def process_client_response(self, client: Client, task_name: str, response: Shareable, fl_ctx: FLContext) -> bool:
"""Process the weights submitted by a client.
Args:
client: the client that submitted the response
task_name: name of the task
response: submitted data from the client
fl_ctx: FLContext
Returns:
boolean to indicate if the client data is acceptable.
If not acceptable, the control flow will exit.
"""
# We only check for client errors here
if not isinstance(response, Shareable):
self.log_error(
fl_ctx,
f"bad response from client {client.name}: " f"response must be Shareable but got {type(response)}",
)
return False
if response.get_return_code() != ReturnCode.OK:
self.log_exception(
fl_ctx, f"bad response from client {client.name}: Got return code {response.get_return_code()}"
)
return False
return True
def final_process(self, fl_ctx: FLContext) -> bool:
"""Perform the final check. Do nothing.
Args:
fl_ctx: FLContext
Returns:
boolean indicating whether the final response processing is successful.
If not successful, the control flow will exit.
"""
# no final processing required for this task
return True
| NVFlare-main | integration/nemo/nemo_nvflare/config_sharer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
def load_weights(model, global_weights, device="cpu"):
"""Helper function to load global weights to local model"""
local_var_dict = model.state_dict()
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
n_loaded = 0
for var_name in global_weights:
if var_name not in local_var_dict:
continue
weights = torch.as_tensor(global_weights[var_name], device=device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except BaseException as e:
raise ValueError(f"Convert weight from {var_name} failed!") from e
model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded! Received weight dict is {global_weights}")
return n_loaded
def compute_model_diff(model, global_weights):
"""Helper function to compute the weight difference with respect to global weights"""
local_var_dict = model.state_dict()
# compute delta model, global model has the primary key set
model_diff = {}
n_diff = 0
for var_name in global_weights:
if var_name not in local_var_dict:
continue
model_diff[var_name] = np.subtract(
local_var_dict[var_name].cpu().numpy(), global_weights[var_name], dtype=np.float32
)
n_diff += 1
if np.any(np.isnan(model_diff[var_name])):
raise ValueError(f"{var_name} weights became NaN!")
if n_diff == 0:
raise ValueError("No weight differences computed!")
return model_diff
| NVFlare-main | integration/nemo/nemo_nvflare/utils_sft.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from nemo.collections.nlp.models.language_modeling.megatron_gpt_sft_model import MegatronGPTSFTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
# configure logging at the root logging level
logging.getLogger().setLevel(logging.INFO)
def _modify_config(gpt_cfg, cfg, add_cfg_to_tree=False):
"""
This function modifies the original gpt pre-training config (gpt_cfg) with attributes from the finetuning config (cfg).
The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`.
"""
OmegaConf.set_struct(gpt_cfg, True)
OmegaConf.resolve(cfg)
with open_dict(gpt_cfg):
gpt_cfg.megatron_amp_O2 = cfg.model.get("megatron_amp_O2", False)
gpt_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size
gpt_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size
gpt_cfg.sequence_parallel = cfg.model.get("sequence_parallel", False)
gpt_cfg.activations_checkpoint_granularity = cfg.model.get("activations_checkpoint_granularity", None)
gpt_cfg.activations_checkpoint_num_layers = cfg.model.get("activations_checkpoint_num_layers", None)
gpt_cfg.activations_checkpoint_method = cfg.model.get("activations_checkpoint_method", None)
gpt_cfg.data = cfg.model.data
gpt_cfg.optim = cfg.model.optim
gpt_cfg.precision = cfg.trainer.precision
gpt_cfg.answer_only_loss = cfg.model.answer_only_loss
gpt_cfg.restore_from_path = cfg.model.restore_from_path
gpt_cfg.resume_from_checkpoint = cfg.model.resume_from_checkpoint
gpt_cfg.save_nemo_on_validation_end = cfg.model.save_nemo_on_validation_end
gpt_cfg.gradient_as_bucket_view = cfg.model.gradient_as_bucket_view
gpt_cfg.hidden_dropout = cfg.model.get("hidden_dropout", 0.0)
gpt_cfg.attention_dropout = cfg.model.get("attention_dropout", 0.0)
gpt_cfg.ffn_dropout = cfg.model.ffn_dropout
# This is needed when modifying a hparam file directly to load `.ckpt` files.
# This is not needed to modify the cfg in `.nemo` files.
if add_cfg_to_tree:
OmegaConf.resolve(gpt_cfg)
gpt_cfg.cfg = gpt_cfg
return gpt_cfg
def load_from_nemo(cls, cfg, trainer, gpt_cfg, modify_config_fn):
gpt_cfg = modify_config_fn(gpt_cfg, cfg, add_cfg_to_tree=False)
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(cfg.model.restore_from_path):
save_restore_connector.model_extracted_dir = cfg.model.restore_from_path
model = cls.restore_from(
restore_path=cfg.model.restore_from_path,
trainer=trainer,
override_config_path=gpt_cfg,
save_restore_connector=save_restore_connector,
)
return model
class ServerSFTModel(torch.nn.Module, FLComponent):
def __init__(
self,
config_path: str = "config/megatron_gpt_prompt_learning_config.yaml",
base_model_file_path: str = "megatron_gpt_345m.nemo",
):
"""
Initializes the NeMo model on the server.
Args:
config_path: NeMo model config file
base_model_file_path: Pre-trained nemo model file
"""
self.config_path = config_path
self.base_model_file_path = base_model_file_path
self.config = None
FLComponent.__init__(self)
torch.nn.Module.__init__(self)
def _initialize(self, fl_ctx: FLContext):
# get app root
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
# Load model configuration to initialize training NeMo environment
self.config = OmegaConf.load(os.path.join(app_root, self.config_path))
self.config.model.restore_from_path = self.base_model_file_path
# Trainer initialization, global model for persistence only, does not use GPU
strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True)
plugins = []
trainer = Trainer(plugins=plugins, strategy=strategy, accelerator="cpu")
# Load pretrained model
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(self.base_model_file_path):
save_restore_connector.model_extracted_dir = self.base_model_file_path
gpt_cfg = MegatronGPTSFTModel.restore_from(
restore_path=self.config.model.restore_from_path,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
self.model = load_from_nemo(MegatronGPTSFTModel, self.config, trainer, gpt_cfg, modify_config_fn=_modify_config)
self.log_info(fl_ctx, "Initialized global model")
def state_dict(self):
return self.model.state_dict()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._initialize(fl_ctx)
| NVFlare-main | integration/nemo/nemo_nvflare/server_sft_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from apex.transformer import parallel_state
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
class FedMegatronGPTPromptLearningModel(MegatronGPTPromptLearningModel):
"""
Federated Learning Model class for prompt-tuning or p-tuning a pretrained Megatron GPT model.
Adapted from https://github.com/NVIDIA/NeMo/blob/v1.17.0/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py
Prompt Tuning initializes virtual prompt embeddings directly from a copy of
certain token embeddings from the pretrained GPT model's vocabulary
and directly tunes these embedding weights. The token embeddings used in
initialization are specified by the user in the config file. The model can
be prompt-tuned for multiple tasks at once. virtual prompts are stored in a
prompt table and can be added or deleted without disrupting virtual prompts
for other tasks.
P-tuning initializes an LSTM encoder model that generates virtual prompt
embeddings for every task. Each task shares the same encoder. After p-tuning
is complete, the learned virtual prompts can be saved to the prompt table
using add_ptuned_prompts_to_prompt_table(). Thus, if a user wants to add a
new virtual prompt via p-tuning, they do not need to retrain on all previous
tasks. This gives p-tuning the same task flexibility as prompt-tuning.
Args:
cfg: NeMo model configuration file
trainer: PyTorch Lighting Trainer
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer)
self.is_initialized = False
self.log_global = False
def setup(self, stage=None):
"""Customize the prompt encoder setup"""
if stage == "predict" and self.first_stage_of_pipeline():
self.freeze_existing_word_embeddings()
return
self.setup_test_data()
if stage == "test":
return
if self.first_stage_of_pipeline():
# Differently from setup() in the super class,
# here we don't initialize the prompt encoder as that
# would overwrite the global weights from the server
self.freeze_existing_word_embeddings()
# Only initialize the datasets once
if not self.is_initialized:
self.setup_training_data()
self.setup_validation_data()
self.is_initialized = True
def validation_epoch_end(self, outputs):
"""Use same logic as in `MegatronGPTPromptLearningModel` but change the logging tag name"""
if self.log_global: # log the global model
log_name = "global_model_val_loss"
else:
log_name = "val_loss"
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack(outputs).mean()
else:
averaged_loss = torch.tensor(0.0).cuda()
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
self.log(log_name, averaged_loss, prog_bar=True, rank_zero_only=True, sync_dist=True)
logging.info(f"{log_name}: {averaged_loss}")
gbs = self.cfg.global_batch_size
mbs = self.cfg.micro_batch_size
self._reconfigure_batch_sizes(gbs, mbs)
| NVFlare-main | integration/nemo/nemo_nvflare/fed_megatron_gpt_prompt_learning_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
def load_weights(model, global_weights, device="cpu"):
"""Helper function to load global weights to local model"""
local_var_dict = model.state_dict()
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
n_loaded = 0
for var_name in global_weights:
_var_name_split = var_name.split(".")
encoder_key = _var_name_split[0]
local_var_name = ".".join(_var_name_split[1:])
if local_var_name not in local_var_dict[encoder_key]:
continue
weights = torch.as_tensor(global_weights[var_name], device=device)
try:
# update the local dict
local_var_dict[encoder_key][local_var_name] = torch.as_tensor(
torch.reshape(weights, local_var_dict[encoder_key][local_var_name].shape)
)
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed!") from e
model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded! Received weight dict is {global_weights}")
return n_loaded
def compute_model_diff(model, global_weights):
"""Helper function to compute the weight difference with respect to global weights"""
local_var_dict = model.state_dict()
# compute delta model, global model has the primary key set
model_diff = {}
n_diff = 0
for var_name in global_weights:
_var_name_split = var_name.split(".")
encoder_key = _var_name_split[0]
local_var_name = ".".join(_var_name_split[1:])
if local_var_name not in local_var_dict[encoder_key]:
continue
model_diff[var_name] = np.subtract(
local_var_dict[encoder_key][local_var_name].cpu().numpy(), global_weights[var_name], dtype=np.float32
)
n_diff += 1
if np.any(np.isnan(model_diff[var_name])):
raise ValueError(f"{var_name} weights became NaN!")
if n_diff == 0:
raise ValueError("No weight differences computed!")
return model_diff
| NVFlare-main | integration/nemo/nemo_nvflare/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from copy import deepcopy
from pytorch_lightning import Callback
class RestoreOptimizers(Callback):
"""Callback to restore the optimizer and learning rate scheduler states at each round of FL"""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.optimizer_states = []
self.scaler_states = []
self.lr_scheduler_states = []
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule"):
if len(self.optimizer_states) > 0:
trainer.strategy.load_optimizer_state_dict({"optimizer_states": self.optimizer_states})
self.logger.info("optimizer states restored.")
else:
return
if len(self.scaler_states) > 0:
trainer.scaler.load_state_dict(self.scaler_states[0])
self.logger.info("scaler states restored.")
if len(self.lr_scheduler_states) > 0:
for config, lr_scheduler_state in zip(trainer.lr_scheduler_configs, self.lr_scheduler_states):
config.scheduler.load_state_dict(lr_scheduler_state)
self.logger.info("LR scheduler states restored.")
def on_fit_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule"):
self.optimizer_states = [deepcopy(opt.state_dict()) for opt in trainer.optimizers]
self.scaler_states = [deepcopy(trainer.scaler.state_dict())]
self.lr_scheduler_states = [deepcopy(config.scheduler.state_dict()) for config in trainer.lr_scheduler_configs]
| NVFlare-main | integration/nemo/nemo_nvflare/callbacks.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from nvflare.app_common.workflows.broadcast_and_process import BroadcastAndProcess
from .config_sharer import ConfigSharer
from .constants import NemoConstants
class ShareConfig(BroadcastAndProcess):
def __init__(
self,
config_path: str = "config/megatron_gpt_prompt_learning_config.yaml",
task_templates_file: str = "config/task_templates.json",
task_name: str = NemoConstants.TASK_SHARE_CONFIG,
min_responses_required: int = 0,
wait_time_after_min_received: int = 0,
task_timeout: int = 0,
clients: Union[List[str], None] = None,
):
"""A controller for sharing the NeMo config files with the clients.
Args:
config_path: NeMo model config file
task_templates_file: Task template file
task_name: name of the task to be sent to clients to share configs
min_responses_required: min number of responses required. 0 means all clients.
wait_time_after_min_received: how long (secs) to wait after min responses are received
task_timeout: max amount of time to wait for the task to end. 0 means never time out.
clients: names of the clients to send config. Defaults to `None`.
If `None`, the task will be sent to all clients.
If list of client names, the config will be only be sent to the listed clients.
"""
if clients is not None:
if not isinstance(clients, list):
raise ValueError(f"Expected list of client names but received {clients}")
BroadcastAndProcess.__init__(
self,
processor=ConfigSharer(config_path=config_path, task_templates_file=task_templates_file),
task_name=task_name,
min_responses_required=min_responses_required,
wait_time_after_min_received=wait_time_after_min_received,
timeout=task_timeout,
clients=clients,
)
| NVFlare-main | integration/nemo/nemo_nvflare/share_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytorch_lightning as pl
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.modules.common.prompt_encoder import PromptEncoder, PromptEncoderType
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from omegaconf import OmegaConf
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
# configure logging at the root logging level
logging.getLogger().setLevel(logging.INFO)
class ServerPromptEncoder(PromptEncoder, FLComponent):
def __init__(
self,
total_virtual_tokens: int = 10,
hidden_size: int = 1024,
taskname: str = "taskname",
config_path: str = "config/megatron_gpt_prompt_learning_config.yaml",
gpt_file_name: str = "megatron_gpt_345m.nemo",
devices: int = 1,
):
"""
Initializes the PromptEncoder module on the server.
Args:
total_virtual_tokens: the total number of virtual tokens
hidden_size: hidden dimension
taskname: prompt learning task name.
config_path: NeMo model config file
gpt_file_name: Pre-trained nemo model file.
devices: number devices for cluster environment.
"""
self.total_virtual_tokens = total_virtual_tokens
self.hidden_size = hidden_size
self.taskname = taskname
self.config_path = config_path
self.gpt_file_name = gpt_file_name
self.devices = devices
self.config = None
FLComponent.__init__(self)
def _init_environment(self):
# setup cluster environment parameters
os.environ["LOCAL_RANK"] = "0"
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = str(self.devices)
strategy = NLPDDPStrategy(find_unused_parameters=False, no_ddp_communication_hook=True)
plugins = [TorchElasticEnvironment()]
trainer = pl.Trainer(plugins=plugins, strategy=strategy)
# only needed to initialize the cluster environment
_model = MegatronGPTPromptLearningModel(cfg=self.config.model, trainer=trainer)
def _initialize(self, fl_ctx: FLContext):
# get app root
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
# Load model configuration to initialize training NeMo environment
self.config = OmegaConf.load(os.path.join(app_root, self.config_path))
self.config.trainer.devices = self.devices
self.config.model.language_model_path = os.path.join(app_root, self.gpt_file_name)
# Using defaults from `init_prompt_encoder` in `MegatronBasePromptLearningModel`
_encoder_type = PromptEncoderType(self.config.model.p_tuning.get("encoder_type", "mlp").lower())
if _encoder_type == PromptEncoderType.TPMLP:
self._init_environment()
PromptEncoder.__init__(
self,
encoder_type=_encoder_type,
total_virtual_tokens=self.total_virtual_tokens,
token_dim=self.hidden_size,
hidden_size=self.config.model.p_tuning.get("encoder_hidden", self.hidden_size // 2),
lstm_dropout=self.config.model.p_tuning.get("dropout", 0.0),
num_layers=self.config.model.p_tuning.get("num_layers", 2),
init_std=self.config.model.p_tuning.get("init_std", 0.023),
taskname=self.taskname,
)
self.log_info(fl_ctx, f"Initialized prompt encoder type {_encoder_type}")
def state_dict(self):
_nemo_state_dict = PromptEncoder.state_dict(self)
# Turn nested dict into single level dict supported by ModelPersistor and Aggregator
state_dict = {}
for encoder_key, prompt_state_dict in _nemo_state_dict.items():
for k, v in prompt_state_dict.items():
state_dict[f"{encoder_key}.{k}"] = v
return state_dict
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._initialize(fl_ctx)
| NVFlare-main | integration/nemo/nemo_nvflare/prompt_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf import OmegaConf
from nvflare.apis.client import Client
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.response_processor import ResponseProcessor
from .constants import NemoDataKind
class ConfigSharerSFT(ResponseProcessor):
def __init__(
self,
config_path: str = "config/megatron_gpt_sft.yaml",
):
"""Share the NeMo config files with the clients.
Args:
config_path: NeMo model config file
"""
super().__init__()
self.config_path = config_path
def create_task_data(self, task_name: str, fl_ctx: FLContext) -> Shareable:
"""Create the data for the task to be sent to clients
Args:
task_name: name of the task
fl_ctx: the FL context
Returns: task data
"""
# get app root
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
# Load model configuration to initialize training NeMo environment
self.config_path = os.path.join(app_root, self.config_path)
config = OmegaConf.load(self.config_path)
self.log_info(fl_ctx, f"Load model configuration from {self.config_path}")
configs = {
NemoDataKind.NEMO_CONFIG: OmegaConf.to_container(config),
}
# convert omega conf to primitive dict
dxo = DXO(data=configs, data_kind=NemoDataKind.CONFIGS)
return dxo.to_shareable()
def process_client_response(self, client: Client, task_name: str, response: Shareable, fl_ctx: FLContext) -> bool:
"""Process the weights submitted by a client.
Args:
client: the client that submitted the response
task_name: name of the task
response: submitted data from the client
fl_ctx: FLContext
Returns:
boolean to indicate if the client data is acceptable.
If not acceptable, the control flow will exit.
"""
# We only check for client errors here
if not isinstance(response, Shareable):
self.log_error(
fl_ctx,
f"bad response from client {client.name}: " f"response must be Shareable but got {type(response)}",
)
return False
if response.get_return_code() != ReturnCode.OK:
self.log_exception(
fl_ctx, f"bad response from client {client.name}: Got return code {response.get_return_code()}"
)
return False
return True
def final_process(self, fl_ctx: FLContext) -> bool:
"""Perform the final check. Do nothing.
Args:
fl_ctx: FLContext
Returns:
boolean indicating whether the final response processing is successful.
If not successful, the control flow will exit.
"""
# no final processing required for this task
return True
| NVFlare-main | integration/nemo/nemo_nvflare/config_sharer_sft.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import shlex
import subprocess
import sys
import time
from io import BytesIO
import pytest
from tests.integration_test.src import ProvisionSiteLauncher
from tests.integration_test.src.constants import PREFLIGHT_CHECK_SCRIPT
TEST_CASES = [
{"project_yaml": "data/projects/dummy.yml", "admin_name": "[email protected]", "is_dummy_overseer": True},
{
"project_yaml": "data/projects/ha_2_servers_2_clients.yml",
"admin_name": "[email protected]",
"is_dummy_overseer": False,
},
]
SERVER_OUTPUT_PASSED = (
"-----------------------------------------------------------------------------------------------------------------------------------\n"
"| Checks | Problems | How to fix |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check overseer running | PASSED | N/A |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check grpc port binding | PASSED | N/A |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check admin port binding | PASSED | N/A |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check snapshot storage writable | PASSED | N/A |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check job storage writable | PASSED | N/A |\n"
"|---------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check dry run | PASSED | N/A |\n"
"-----------------------------------------------------------------------------------------------------------------------------------"
)
OVERSEER_OUTPUT_PASSED = (
"-------------------------------------------------------------------------------------------------------------------------------\n"
"| Checks | Problems | How to fix |\n"
"|-----------------------------------------------------------------------------------------------------------------------------|\n"
"| Check overseer port binding | PASSED | N/A |\n"
"|-----------------------------------------------------------------------------------------------------------------------------|\n"
"| Check dry run | PASSED | N/A |\n"
"-------------------------------------------------------------------------------------------------------------------------------"
)
CLIENT_OUTPUT_PASSED = (
"--------------------------------------------------------------------------------------------------------------------------------------------------\n"
"| Checks | Problems | How to fix |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check overseer running | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check service provider list available | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check primary SP's socket server available | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check primary SP's GRPC server available | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check non-primary SP's socket server available | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check non-primary SP's GRPC server available | PASSED | N/A |\n"
"|------------------------------------------------------------------------------------------------------------------------------------------------|\n"
"| Check dry run | PASSED | N/A |\n"
"--------------------------------------------------------------------------------------------------------------------------------------------------"
)
# TODO: this is a hack to filter out the GRPC message
# "Other threads are currently calling into gRPC, skipping fork() handlers"
GRPC_ERROR_MSG = "Other threads are currently calling into gRPC, skipping fork() handlers"
SERVER_START_TIME = 15
def _filter_output(output):
lines = []
for line in output.decode("utf-8").splitlines():
if GRPC_ERROR_MSG in line:
continue
elif "Checking Package" in line:
continue
elif "killing dry run process" in line:
continue
elif "killed dry run process" in line:
continue
elif not line:
continue
lines.append(line)
return lines
def _run_preflight_check_command_in_subprocess(package_path: str):
command = f"{sys.executable} -m {PREFLIGHT_CHECK_SCRIPT} -p {package_path}"
print(f"Executing command {command} in subprocess")
output = subprocess.check_output(shlex.split(command))
return output
def _run_preflight_check_command_in_pseudo_terminal(package_path: str):
command = f"{sys.executable} -m {PREFLIGHT_CHECK_SCRIPT} -p {package_path}"
print(f"Executing command {command} in pty")
with BytesIO() as output:
def read(fd):
data = os.read(fd, 1024 * 1024 * 1024)
output.write(data)
return data
pty.spawn(shlex.split(command), read)
return output.getvalue()
def _run_preflight_check_command(package_path: str, method: str = "subprocess"):
if method == "subprocess":
return _run_preflight_check_command_in_subprocess(package_path)
else:
return _run_preflight_check_command_in_pseudo_terminal(package_path)
@pytest.fixture(
params=TEST_CASES,
)
def setup_system(request):
test_config = request.param
project_yaml_path = test_config["project_yaml"]
is_dummy_overseer = test_config["is_dummy_overseer"]
admin_name = test_config["admin_name"]
if not os.path.isfile(project_yaml_path):
raise RuntimeError(f"Missing project_yaml at {project_yaml_path}.")
site_launcher = ProvisionSiteLauncher(project_yaml=project_yaml_path)
workspace_root = site_launcher.prepare_workspace()
print(f"Workspace root is {workspace_root}")
admin_folder_root = os.path.abspath(os.path.join(workspace_root, admin_name))
return site_launcher, is_dummy_overseer, admin_folder_root
@pytest.mark.xdist_group(name="preflight_tests_group")
class TestPreflightCheck:
def test_run_check_on_overseer(self, setup_system):
site_launcher, is_dummy_overseer, _ = setup_system
try:
# preflight-check on overseer
if is_dummy_overseer:
return
output = _run_preflight_check_command(package_path=site_launcher.overseer_properties.root_dir)
assert _filter_output(output) == OVERSEER_OUTPUT_PASSED.splitlines()
finally:
site_launcher.cleanup()
def test_run_check_on_server_after_overseer_start(self, setup_system):
site_launcher, is_dummy_overseer, _ = setup_system
try:
if not is_dummy_overseer:
site_launcher.start_overseer()
# preflight-check on server
for server_name, server_props in site_launcher.server_properties.items():
output = _run_preflight_check_command(package_path=server_props.root_dir)
assert _filter_output(output) == SERVER_OUTPUT_PASSED.splitlines()
finally:
site_launcher.stop_all_sites()
site_launcher.cleanup()
def test_run_check_on_server_before_overseer_start(self, setup_system):
site_launcher, is_dummy_overseer, _ = setup_system
try:
# preflight-check on server
for server_name, server_props in site_launcher.server_properties.items():
output = _run_preflight_check_command(package_path=server_props.root_dir)
if is_dummy_overseer:
assert _filter_output(output) == SERVER_OUTPUT_PASSED.splitlines()
else:
assert _filter_output(output) != SERVER_OUTPUT_PASSED.splitlines()
finally:
site_launcher.stop_all_sites()
site_launcher.cleanup()
def test_run_check_on_client(self, setup_system):
site_launcher, is_dummy_overseer, _ = setup_system
try:
if not is_dummy_overseer:
site_launcher.start_overseer()
site_launcher.start_servers()
time.sleep(SERVER_START_TIME)
# preflight-check on clients
for client_name, client_props in site_launcher.client_properties.items():
output = _run_preflight_check_command(package_path=client_props.root_dir)
assert _filter_output(output) == CLIENT_OUTPUT_PASSED.splitlines()
except Exception:
raise
finally:
site_launcher.stop_all_sites()
site_launcher.cleanup()
def test_run_check_on_admin_console(self, setup_system):
site_launcher, is_dummy_overseer, admin_folder_root = setup_system
try:
if not is_dummy_overseer:
site_launcher.start_overseer()
site_launcher.start_servers()
time.sleep(SERVER_START_TIME)
# preflight-check on admin console
output = _run_preflight_check_command(package_path=admin_folder_root)
assert _filter_output(output) == CLIENT_OUTPUT_PASSED.splitlines()
except Exception:
raise
finally:
site_launcher.stop_all_sites()
site_launcher.cleanup()
| NVFlare-main | tests/integration_test/preflight_check_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/integration_test/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pytest
from tests.integration_test.src import OALauncher
@pytest.mark.xdist_group(name="overseer_tests_group")
class TestOverseer:
def test_overseer_server_down_and_up(self):
oa_launcher = OALauncher()
try:
oa_launcher.start_overseer()
time.sleep(1)
server_agent_list = oa_launcher.start_servers(2)
client_agent_list = oa_launcher.start_clients(4)
time.sleep(10)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server00"
oa_launcher.pause_server(server_agent_list[0])
time.sleep(20)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server01"
oa_launcher.resume_server(server_agent_list[0])
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server01"
time.sleep(10)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server01"
finally:
oa_launcher.stop_clients()
oa_launcher.stop_servers()
oa_launcher.stop_overseer()
def test_overseer_client_down_and_up(self):
oa_launcher = OALauncher()
try:
oa_launcher.start_overseer()
time.sleep(10)
_ = oa_launcher.start_servers(1)
client_agent_list = oa_launcher.start_clients(1)
time.sleep(10)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server00"
oa_launcher.pause_client(client_agent_list[0])
time.sleep(10)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server00"
oa_launcher.resume_client(client_agent_list[0])
time.sleep(10)
psp = oa_launcher.get_primary_sp(client_agent_list[0])
assert psp.name == "server00"
finally:
oa_launcher.stop_clients()
oa_launcher.stop_servers()
oa_launcher.stop_overseer()
def test_overseer_overseer_down_and_up(self):
oa_launcher = OALauncher()
try:
oa_launcher.start_overseer()
time.sleep(10)
_ = oa_launcher.start_servers(1)
client_agent_list = oa_launcher.start_clients(4)
time.sleep(10)
for client_agent in client_agent_list:
psp = oa_launcher.get_primary_sp(client_agent)
assert psp.name == "server00"
oa_launcher.stop_overseer()
time.sleep(10)
oa_launcher.start_overseer()
time.sleep(10)
for client_agent in client_agent_list:
psp = oa_launcher.get_primary_sp(client_agent)
assert psp.name == "server00"
finally:
oa_launcher.stop_clients()
oa_launcher.stop_servers()
oa_launcher.stop_overseer()
| NVFlare-main | tests/integration_test/overseer_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
import sys
import tempfile
import time
import pytest
from tests.integration_test.src import (
NVFTestDriver,
NVFTestError,
POCSiteLauncher,
ProvisionSiteLauncher,
cleanup_path,
read_yaml,
run_command_in_subprocess,
)
def get_module_class_from_full_path(full_path):
tokens = full_path.split(".")
cls_name = tokens[-1]
mod_name = ".".join(tokens[: len(tokens) - 1])
return mod_name, cls_name
def get_test_config(test_config_yaml: str):
print(f"Test config from: {test_config_yaml}")
test_config = read_yaml(test_config_yaml)
test_config["single_app_as_job"] = test_config.get("single_app_as_job", False)
test_config["cleanup"] = test_config.get("cleanup", True)
test_config["ha"] = test_config.get("ha", False)
for x in ["cleanup", "single_app_as_job"]:
if x not in test_config:
raise NVFTestError(f"Test config: {test_config_yaml} missing required attributes {x}.")
print(f"\t{x}: {test_config[x]}")
if test_config["single_app_as_job"]:
if "apps_root_dir" not in test_config:
raise NVFTestError(f"Test config: {test_config_yaml} missing apps_root_dir.")
print(f"\tapps_root_dir: {test_config['apps_root_dir']}")
else:
if "jobs_root_dir" not in test_config:
raise NVFTestError(f"Test config: {test_config_yaml} missing jobs_root_dir.")
print(f"\tjobs_root_dir: {test_config['jobs_root_dir']}")
if test_config["ha"]:
if "project_yaml" not in test_config:
raise NVFTestError(f"Test config: {test_config_yaml} missing project_yaml.")
else:
for x in ["n_servers", "n_clients"]:
if x not in test_config:
raise NVFTestError(f"Test config: {test_config_yaml} missing required attributes {x}.")
return test_config
framework = os.environ.get("NVFLARE_TEST_FRAMEWORK")
test_configs_yaml = "auto_test_configs.yml" if framework == "auto" else "test_configs.yml"
test_configs = read_yaml(test_configs_yaml)
if framework not in test_configs["test_configs"]:
print(f"Framework/test {framework} is not supported, using default numpy.")
framework = "numpy"
print(f"Testing framework {framework}")
test_configs = test_configs["test_configs"][framework]
@pytest.fixture(
scope="class",
params=test_configs,
)
def setup_and_teardown_system(request):
yaml_path = os.path.join(os.path.dirname(__file__), request.param)
test_config = get_test_config(yaml_path)
cleanup = test_config["cleanup"]
ha = test_config["ha"]
poll_period = test_config.get("poll_period", 5)
additional_python_paths = test_config.get("additional_python_paths", [])
for additional_python_path in additional_python_paths:
sys.path.append(os.path.abspath(additional_python_path))
test_temp_dir = tempfile.mkdtemp()
test_driver = None
site_launcher = None
try:
if ha:
project_yaml_path = test_config.get("project_yaml")
if not os.path.isfile(project_yaml_path):
raise NVFTestError(f"Missing project_yaml at {project_yaml_path}.")
site_launcher = ProvisionSiteLauncher(project_yaml=project_yaml_path)
poc = False
super_user_name = "[email protected]"
else:
n_servers = int(test_config["n_servers"])
if n_servers != 1:
raise NVFTestError("POC mode can only use one server. For more servers, use HA with provisioned mode.")
n_clients = int(test_config["n_clients"])
site_launcher = POCSiteLauncher(n_servers=n_servers, n_clients=n_clients)
poc = False # POC now uses SSL as well so this needs to be False
super_user_name = "[email protected]"
workspace_root = site_launcher.prepare_workspace()
print(f"Workspace root is {workspace_root}")
print(f"sys.path start is: {sys.path}")
if ha:
site_launcher.start_overseer()
site_launcher.start_servers()
site_launcher.start_clients()
# testing cases
test_cases = []
jobs_root_dir = test_config["jobs_root_dir"]
for x in test_config["tests"]:
test_cases.append(
(
x["test_name"],
x.get("validators"),
x.get("setup", []),
x.get("teardown", []),
x.get("event_sequence", []),
x.get("reset_job_info", True),
),
)
download_root_dir = os.path.join(test_temp_dir, "download_result")
os.mkdir(download_root_dir)
test_driver = NVFTestDriver(
site_launcher=site_launcher, download_root_dir=download_root_dir, poll_period=poll_period
)
test_driver.initialize_super_user(
workspace_root_dir=workspace_root, upload_root_dir=jobs_root_dir, poc=poc, super_user_name=super_user_name
)
if ha:
test_driver.initialize_admin_users(
workspace_root_dir=workspace_root,
upload_root_dir=jobs_root_dir,
poc=poc,
admin_user_names=site_launcher.admin_user_names,
)
test_driver.ensure_clients_started(num_clients=len(site_launcher.client_properties.keys()), timeout=2000)
yield ha, test_cases, site_launcher, test_driver
finally:
if test_driver:
test_driver.finalize()
if site_launcher:
site_launcher.stop_all_sites()
if cleanup:
if site_launcher:
site_launcher.cleanup()
cleanup_path(test_temp_dir)
sys.path = sys.path[: -len(additional_python_paths) or None]
print(f"sys.path finish is: {sys.path}")
@pytest.mark.xdist_group(name="system_tests_group")
class TestSystem:
def test_run_job_complete(self, setup_and_teardown_system):
ha, test_cases, site_launcher, test_driver = setup_and_teardown_system
print(f"Server status: {test_driver.server_status()}.")
print(f"Client status: {test_driver.client_status()}")
test_validate_results = []
for test_data in test_cases:
test_name, validators, setup, teardown, event_sequence, reset_job_info = test_data
print(f"Running test {test_name}")
start_time = time.time()
for command in setup:
print(f"Running setup command: {command}")
process = run_command_in_subprocess(command)
process.wait()
test_driver.run_event_sequence(event_sequence)
job_result = None
if test_driver.job_id is not None:
job_result = test_driver.get_job_result(test_driver.job_id)
# Get the job validator
if validators:
validate_result = True
for validator in validators:
validator_module = validator["path"]
validator_args = validator.get("args", {})
# Create validator instance
module_name, class_name = get_module_class_from_full_path(validator_module)
job_validator_cls = getattr(importlib.import_module(module_name), class_name)
job_validator = job_validator_cls(**validator_args)
job_validate_res = job_validator.validate_results(
job_result=job_result,
client_props=list(site_launcher.client_properties.values()),
)
print(f"Test {test_name}, Validator {job_validator.__class__.__name__}, Result: {job_validate_res}")
if not job_validate_res:
validate_result = False
break
else:
print("No validators provided so results set to No Validators.")
validate_result = "No Validators"
test_validate_results.append((test_name, validate_result))
print(f"Finished running test '{test_name}' in {time.time() - start_time} seconds.")
for command in teardown:
print(f"Running teardown command: {command}")
process = run_command_in_subprocess(command)
process.wait()
test_driver.reset_test_info(reset_job_info=reset_job_info)
print("\n\n\n\n\n")
_print_validate_result(validate_result=test_validate_results)
def _print_validate_result(validate_result: list):
test_name_length = 10
result_length = 20
failure = False
for test_name, result in validate_result:
test_name_length = max(test_name_length, len(test_name))
result_length = max(result_length, len(str(result)))
if not result:
failure = True
print("=" * (test_name_length + result_length + 7))
print("| {arg:<{width}s} |".format(arg="Test validate results", width=test_name_length + result_length + 3))
print("|" + "-" * (test_name_length + result_length + 5) + "|")
print(
"| {test_name:<{width1}s} | {result:<{width2}s} |".format(
test_name="Test Name",
result="Validate Result",
width1=test_name_length,
width2=result_length,
)
)
print("|" + "-" * (test_name_length + result_length + 5) + "|")
for test_name, result in validate_result:
print(
"| {test_name:<{width1}s} | {result:<{width2}s} |".format(
test_name=test_name,
result=str(result),
width1=test_name_length,
width2=result_length,
)
)
print("|" + "-" * (test_name_length + result_length + 5) + "|")
print("| {arg:<{width}s} |".format(arg=f"Final result: {not failure}", width=test_name_length + result_length + 3))
print("=" * (test_name_length + result_length + 7))
assert not failure
| NVFlare-main | tests/integration_test/system_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import yaml
from tests.integration_test.src import generate_test_config_yaml_for_example, read_yaml
from tests.integration_test.src.example import Example
def _construct_example_from_registry(registry: dict, examples_root: str) -> Example:
example_root = os.path.join(examples_root, registry["root"])
jobs_folder_in_example = registry.get("jobs_folder_in_example", "jobs")
requirements_in_example = registry.get("requirements", "requirements.txt")
additional_python_path = registry.get("additional_python_path")
if additional_python_path is not None and not os.path.isabs(additional_python_path):
additional_python_path = os.path.join(examples_root, additional_python_path)
return Example(
root=example_root,
jobs_folder_in_example=jobs_folder_in_example,
requirements=requirements_in_example,
additional_python_path=additional_python_path,
prepare_data_script=registry.get("prepare_data_script"),
)
def main():
parser = ArgumentParser("Generate all test configs")
parser.add_argument(
"--example_test_registry",
default="example_registry.yml",
type=str,
help="a yaml file that specifies information needed to generate integration test's config for examples",
)
args = parser.parse_args()
all_output_yamls = []
# generate individual test yaml files
example_list = read_yaml(args.example_test_registry)
examples_root = example_list["examples_root"]
for example_registry in example_list["examples"]:
if "root" not in example_registry:
print(f"Missing root attribute in registry: {example_registry}")
continue
try:
example = _construct_example_from_registry(example_registry, examples_root)
output_yamls = generate_test_config_yaml_for_example(example=example)
all_output_yamls.extend(output_yamls)
except FileNotFoundError as e:
print(f"Skip invalid example entry ({example_registry}): {e}")
continue
# generate overall test config yaml
test_config = {"test_configs": {"auto": all_output_yamls}}
with open("auto_test_configs.yml", "w") as yaml_file:
yaml.dump(test_config, yaml_file, default_flow_style=False)
if __name__ == "__main__":
main()
| NVFlare-main | tests/integration_test/generate_all_test_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from tests.integration_test.src.utils import simplify_job
def main():
parser = ArgumentParser("Simplify job")
parser.add_argument("--job", required=True, type=str)
parser.add_argument("--postfix", type=str, default="_copy")
args = parser.parse_args()
simplify_job(job_folder_path=args.job, postfix=args.postfix)
if __name__ == "__main__":
main()
| NVFlare-main | tests/integration_test/convert_to_test_job.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class Net(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense1(x)
x = self.dropout(x)
x = self.dense2(x)
return x
| NVFlare-main | tests/integration_test/data/apps/cyclic/custom/tf2_net.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
from nvflare.fuel.utils import fobs
class TF2ModelPersistor(ModelPersistor):
def __init__(self, save_name="tf2_model.fobs"):
super().__init__()
self.save_name = save_name
def _initialize(self, fl_ctx: FLContext):
# get save path from FLContext
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
env = None
run_args = fl_ctx.get_prop(FLContextKey.ARGS)
if run_args:
env_config_file_name = os.path.join(app_root, run_args.env)
if os.path.exists(env_config_file_name):
try:
with open(env_config_file_name) as file:
env = json.load(file)
except:
self.system_panic(
reason="error opening env config file {}".format(env_config_file_name), fl_ctx=fl_ctx
)
return
if env is not None:
if env.get("APP_CKPT_DIR", None):
fl_ctx.set_prop(AppConstants.LOG_DIR, env["APP_CKPT_DIR"], private=True, sticky=True)
if env.get("APP_CKPT") is not None:
fl_ctx.set_prop(
AppConstants.CKPT_PRELOAD_PATH,
env["APP_CKPT"],
private=True,
sticky=True,
)
log_dir = fl_ctx.get_prop(AppConstants.LOG_DIR)
if log_dir:
self.log_dir = os.path.join(app_root, log_dir)
else:
self.log_dir = app_root
self._fobs_save_path = os.path.join(self.log_dir, self.save_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Initializes and loads the Model.
Args:
fl_ctx: FLContext
Returns:
Model object
"""
if os.path.exists(self._fobs_save_path):
self.logger.info("Loading server weights")
with open(self._fobs_save_path, "rb") as f:
model_learnable = fobs.load(f)
else:
self.logger.info("Initializing server model")
network = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
network.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = network(tf.keras.Input(shape=(28, 28)))
var_dict = {network.get_layer(index=key).name: value for key, value in enumerate(network.get_weights())}
model_learnable = make_model_learnable(var_dict, dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Saves model.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
model_learnable_info = {k: str(type(v)) for k, v in model_learnable.items()}
self.logger.info(f"Saving aggregated server weights: \n {model_learnable_info}")
with open(self._fobs_save_path, "wb") as f:
fobs.dump(model_learnable, f)
| NVFlare-main | tests/integration_test/data/apps/cyclic/custom/tf2_model_persistor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/integration_test/data/apps/cyclic/custom/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
class SimpleTrainer(Executor):
def __init__(self, epochs_per_round):
super().__init__()
self.epochs_per_round = epochs_per_round
self.train_images, self.train_labels = None, None
self.test_images, self.test_labels = None, None
self.model = None
self.var_list = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.setup(fl_ctx)
def setup(self, fl_ctx: FLContext):
(self.train_images, self.train_labels), (
self.test_images,
self.test_labels,
) = tf.keras.datasets.mnist.load_data()
self.train_images, self.test_images = (
self.train_images / 255.0,
self.test_images / 255.0,
)
# simulate separate datasets for each client by dividing MNIST dataset in half
client_name = fl_ctx.get_identity_name()
if client_name == "site-1":
self.train_images = self.train_images[: len(self.train_images) // 2]
self.train_labels = self.train_labels[: len(self.train_labels) // 2]
self.test_images = self.test_images[: len(self.test_images) // 2]
self.test_labels = self.test_labels[: len(self.test_labels) // 2]
elif client_name == "site-2":
self.train_images = self.train_images[len(self.train_images) // 2 :]
self.train_labels = self.train_labels[len(self.train_labels) // 2 :]
self.test_images = self.test_images[len(self.test_images) // 2 :]
self.test_labels = self.test_labels[len(self.test_labels) // 2 :]
model = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = model(tf.keras.Input(shape=(28, 28)))
self.var_list = [model.get_layer(index=index).name for index in range(len(model.get_weights()))]
self.model = model
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""
This function is an extended function from the super class.
As a supervised learning based trainer, the train function will run
evaluate and train engines based on model weights from `shareable`.
After finishing training, a new `Shareable` object will be submitted
to server for aggregation.
Args:
task_name: dispatched task
shareable: the `Shareable` object received from server.
fl_ctx: the `FLContext` object received from server.
abort_signal: if triggered, the training will be aborted.
Returns:
a new `Shareable` object to be submitted to server for aggregation.
"""
# retrieve model weights download from server's shareable
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
if task_name != "train":
return make_reply(ReturnCode.TASK_UNKNOWN)
dxo = from_shareable(shareable)
model_weights = dxo.data
# use previous round's client weights to replace excluded layers from server
prev_weights = {
self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())
}
ordered_model_weights = {key: model_weights.get(key) for key in prev_weights}
for key in self.var_list:
value = ordered_model_weights.get(key)
if np.all(value == 0):
ordered_model_weights[key] = prev_weights[key]
# update local model weights with received weights
self.model.set_weights(list(ordered_model_weights.values()))
# adjust LR or other training time info as needed
# such as callback in the fit function
self.model.fit(
self.train_images,
self.train_labels,
epochs=self.epochs_per_round,
validation_data=(self.test_images, self.test_labels),
)
# report updated weights in shareable
weights = {self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())}
dxo = DXO(data_kind=DataKind.WEIGHTS, data=weights)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
new_shareable = dxo.to_shareable()
return new_shareable
| NVFlare-main | tests/integration_test/data/apps/cyclic/custom/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.np.np_trainer import NPTrainer
class SlowTrainer(NPTrainer):
def _train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
time.sleep(10.0)
return super()._train(shareable, fl_ctx, abort_signal)
| NVFlare-main | tests/integration_test/data/apps/slow_job/custom/slow_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNetwork(nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | tests/integration_test/data/apps/pt_use_path/custom/simple_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import torch.cuda
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTModelLocator(ModelLocator):
def __init__(self, exclude_vars=None):
super(PTModelLocator, self).__init__()
self.model = SimpleNetwork()
self.exclude_vars = exclude_vars
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [PTConstants.PTServerName]
def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]:
if model_name == PTConstants.PTServerName:
try:
server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_job_id())
model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName)
if not os.path.exists(model_path):
return None
# Load the torch model
device = "cuda" if torch.cuda.is_available() else "cpu"
data = torch.load(model_path, map_location=device)
# Set up the persistence manager.
if self.model:
default_train_conf = {"train": {"model": type(self.model).__name__}}
else:
default_train_conf = None
# Use persistence manager to get learnable
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf)
ml = persistence_manager.to_model_learnable(exclude_vars=None)
# Create dxo and return
return model_learnable_to_dxo(ml)
except Exception as e:
self.log_error(fl_ctx, f"Error in retrieving {model_name}: {e}.", fire_event=False)
return None
else:
self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False)
return None
| NVFlare-main | tests/integration_test/data/apps/pt_use_path/custom/pt_model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class Cifar10Trainer(Executor):
def __init__(
self,
data_path,
lr=0.01,
epochs=5,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
exclude_vars=None,
):
"""CIFAR10 Trainer handles train and submit_model tasks. During train_task, it trains a
simple network on CIFAR10 dataset. For submit_model task, it sends the locally trained model
(if present) to the server.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
train_task_name (str, optional): Task name for train task. Defaults to "train".
submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model".
exclude_vars (list): List of variables to exclude during model loading.
"""
super(Cifar10Trainer, self).__init__()
self._lr = lr
self._epochs = epochs
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
self._exclude_vars = exclude_vars
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=lr, momentum=0.9)
# Create CIFAR10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self._train_dataset = CIFAR10(root=data_path, transform=transforms, train=True)
self._train_loader = DataLoader(self._train_dataset, batch_size=4, shuffle=True)
self._n_iterations = len(self._train_loader)
# Set up the persistence manager to save PT model.
# The default training configuration is used by persistence manager
# in case no initial model is found.
self._default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self._default_train_conf
)
def local_train(self, fl_ctx, weights, abort_signal):
# Set the model weights
self.model.load_state_dict(state_dict=weights)
# Basic training
self.model.train()
for epoch in range(self._epochs):
running_loss = 0.0
for i, batch in enumerate(self._train_loader):
if abort_signal.triggered:
# If abort_signal is triggered, we simply return.
# The outside function will check it again and decide steps to take.
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self._epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
if task_name == self._train_task_name:
# Get model weights
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
self.local_train(fl_ctx, torch_weights, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS,
data=new_weights,
meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations},
)
return outgoing_dxo.to_shareable()
elif task_name == self._submit_model_task_name:
# Load local model
ml = self.load_local_model(fl_ctx)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
except:
self.log_exception(fl_ctx, "Exception in simple trainer.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, "models")
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
def load_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, "models")
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self._default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars)
return ml
| NVFlare-main | tests/integration_test/data/apps/pt_use_path/custom/cifar10trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class Cifar10Validator(Executor):
def __init__(self, data_path, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self.test_data = CIFAR10(root=data_path, train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| NVFlare-main | tests/integration_test/data/apps/pt_use_path/custom/cifar10validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
| NVFlare-main | tests/integration_test/data/apps/pt_use_path/custom/pt_constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/integration_test/data/apps/sag_exception/custom/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
class ExceptionTrainer(Executor):
def __init__(
self,
sleep_time=0,
):
# Init functions of components should be very minimal. Init
# is called when json is read. A big init will cause json loading to halt
# for long time.
super().__init__()
self._sleep_time = sleep_time
def handle_event(self, event_type: str, fl_ctx: FLContext):
# if event_type == EventType.START_RUN:
# # Create all major components here.
# pass
# elif event_type == EventType.END_RUN:
# # Clean up resources (closing files, joining threads, removing dirs etc)
# pass
pass
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Any kind of tasks waiting should check abort_signal regularly
count, interval = 0, 0.5
while count < self._sleep_time:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
time.sleep(interval)
count += interval
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
| NVFlare-main | tests/integration_test/data/apps/sag_exception/custom/exception_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNetwork(nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | tests/integration_test/data/apps/pt_init_client/custom/simple_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import torch.cuda
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTModelLocator(ModelLocator):
def __init__(self):
super().__init__()
self.model = SimpleNetwork()
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [PTConstants.PTServerName]
def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]:
if model_name == PTConstants.PTServerName:
try:
server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_job_id())
model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName)
if not os.path.exists(model_path):
return None
# Load the torch model
device = "cuda" if torch.cuda.is_available() else "cpu"
data = torch.load(model_path, map_location=device)
# Set up the persistence manager.
if self.model:
default_train_conf = {"train": {"model": type(self.model).__name__}}
else:
default_train_conf = None
# Use persistence manager to get learnable
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf)
ml = persistence_manager.to_model_learnable(exclude_vars=None)
# Create dxo and return
return model_learnable_to_dxo(ml)
except Exception as e:
self.log_error(fl_ctx, f"Error in retrieving {model_name}: {e}.", fire_event=False)
return None
else:
self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False)
return None
| NVFlare-main | tests/integration_test/data/apps/pt_init_client/custom/pt_model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class Cifar10Trainer(Executor):
def __init__(
self,
data_path="~/data",
lr=0.01,
epochs=5,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
exclude_vars=None,
pre_train_task_name=AppConstants.TASK_GET_WEIGHTS,
):
"""Cifar10 Trainer handles train and submit_model tasks. During train_task, it trains a
simple network on CIFAR10 dataset. For submit_model task, it sends the locally trained model
(if present) to the server.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
train_task_name (str, optional): Task name for train task. Defaults to "train".
submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model".
exclude_vars (list): List of variables to exclude during model loading.
pre_train_task_name: Task name for pre train task, i.e., sending initial model weights.
"""
super().__init__()
self._lr = lr
self._epochs = epochs
self._train_task_name = train_task_name
self._pre_train_task_name = pre_train_task_name
self._submit_model_task_name = submit_model_task_name
self._exclude_vars = exclude_vars
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=lr, momentum=0.9)
# Create Cifar10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self._train_dataset = CIFAR10(root=data_path, transform=transforms, download=True, train=True)
self._train_loader = DataLoader(self._train_dataset, batch_size=4, shuffle=True)
self._n_iterations = len(self._train_loader)
# Setup the persistence manager to save PT model.
# The default training configuration is used by persistence manager
# in case no initial model is found.
self._default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self._default_train_conf
)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
if task_name == self._pre_train_task_name:
# Get the new state dict and send as weights
return self._get_model_weights()
elif task_name == self._train_task_name:
# Get model weights
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
self._local_train(fl_ctx, torch_weights, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self._save_local_model(fl_ctx)
# Get the new state dict and send as weights
return self._get_model_weights()
elif task_name == self._submit_model_task_name:
# Load local model
ml = self._load_local_model(fl_ctx)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
self.log_exception(fl_ctx, f"Exception in simple trainer: {e}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _get_model_weights(self) -> Shareable:
# Get the new state dict and send as weights
weights = {k: v.cpu().numpy() for k, v in self.model.state_dict().items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations}
)
return outgoing_dxo.to_shareable()
def _local_train(self, fl_ctx, weights, abort_signal):
# Set the model weights
self.model.load_state_dict(state_dict=weights)
# Basic training
self.model.train()
for epoch in range(self._epochs):
running_loss = 0.0
for i, batch in enumerate(self._train_loader):
if abort_signal.triggered:
# If abort_signal is triggered, we simply return.
# The outside function will check it again and decide steps to take.
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self._epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
def _save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
def _load_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self._default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars)
return ml
| NVFlare-main | tests/integration_test/data/apps/pt_init_client/custom/cifar10trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class Cifar10Validator(Executor):
def __init__(self, data_path="~/data", validate_task_name=AppConstants.TASK_VALIDATION):
super().__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
test_data = CIFAR10(root=data_path, train=False, transform=transforms)
self._test_loader = DataLoader(test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self._validate(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def _validate(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self._test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| NVFlare-main | tests/integration_test/data/apps/pt_init_client/custom/cifar10validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
PTModelsDir = "models"
| NVFlare-main | tests/integration_test/data/apps/pt_init_client/custom/pt_constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNetwork(nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | tests/integration_test/data/apps/pt_use_name/custom/simple_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import torch.cuda
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTModelLocator(ModelLocator):
def __init__(self, exclude_vars=None):
super(PTModelLocator, self).__init__()
self.model = SimpleNetwork()
self.exclude_vars = exclude_vars
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [PTConstants.PTServerName]
def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]:
if model_name == PTConstants.PTServerName:
try:
server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_job_id())
model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName)
if not os.path.exists(model_path):
return None
# Load the torch model
device = "cuda" if torch.cuda.is_available() else "cpu"
data = torch.load(model_path, map_location=device)
# Set up the persistence manager.
if self.model:
default_train_conf = {"train": {"model": type(self.model).__name__}}
else:
default_train_conf = None
# Use persistence manager to get learnable
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf)
ml = persistence_manager.to_model_learnable(exclude_vars=None)
# Create dxo and return
return model_learnable_to_dxo(ml)
except Exception as e:
self.log_error(fl_ctx, f"Error in retrieving {model_name}: {e}.", fire_event=False)
return None
else:
self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False)
return None
| NVFlare-main | tests/integration_test/data/apps/pt_use_name/custom/pt_model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class Cifar10Trainer(Executor):
def __init__(
self,
data_path,
lr=0.01,
epochs=5,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
exclude_vars=None,
):
"""CIFAR10 Trainer handles train and submit_model tasks. During train_task, it trains a
simple network on CIFAR10 dataset. For submit_model task, it sends the locally trained model
(if present) to the server.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
train_task_name (str, optional): Task name for train task. Defaults to "train".
submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model".
exclude_vars (list): List of variables to exclude during model loading.
"""
super(Cifar10Trainer, self).__init__()
self._lr = lr
self._epochs = epochs
self._train_task_name = train_task_name
self._submit_model_task_name = submit_model_task_name
self._exclude_vars = exclude_vars
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=lr, momentum=0.9)
# Create CIFAR10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self._train_dataset = CIFAR10(root=data_path, transform=transforms, train=True)
self._train_loader = DataLoader(self._train_dataset, batch_size=4, shuffle=True)
self._n_iterations = len(self._train_loader)
# Set up the persistence manager to save PT model.
# The default training configuration is used by persistence manager
# in case no initial model is found.
self._default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self._default_train_conf
)
def local_train(self, fl_ctx, weights, abort_signal):
# Set the model weights
self.model.load_state_dict(state_dict=weights)
# Basic training
self.model.train()
for epoch in range(self._epochs):
running_loss = 0.0
for i, batch in enumerate(self._train_loader):
if abort_signal.triggered:
# If abort_signal is triggered, we simply return.
# The outside function will check it again and decide steps to take.
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self._epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
if task_name == self._train_task_name:
# Get model weights
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
self.local_train(fl_ctx, torch_weights, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS,
data=new_weights,
meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations},
)
return outgoing_dxo.to_shareable()
elif task_name == self._submit_model_task_name:
# Load local model
ml = self.load_local_model(fl_ctx)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
except:
self.log_exception(fl_ctx, "Exception in simple trainer.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, "models")
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
def load_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, "models")
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self._default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars)
return ml
| NVFlare-main | tests/integration_test/data/apps/pt_use_name/custom/cifar10trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class Cifar10Validator(Executor):
def __init__(self, data_path, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self.test_data = CIFAR10(root=data_path, train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| NVFlare-main | tests/integration_test/data/apps/pt_use_name/custom/cifar10validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
| NVFlare-main | tests/integration_test/data/apps/pt_use_name/custom/pt_constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/integration_test/data/apps/np_sag_weights_diff/custom/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.np.constants import NPConstants
from nvflare.app_common.np.np_trainer import NPTrainer as BaseNPTrainer
class NPTrainer(BaseNPTrainer):
def __init__(
self,
delta=1,
sleep_time=0,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
model_name="best_numpy.npy",
model_dir="model",
):
super().__init__(
delta=delta,
sleep_time=sleep_time,
train_task_name=train_task_name,
submit_model_task_name=submit_model_task_name,
model_name=model_name,
model_dir=model_dir,
)
def _train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal):
# First we extract DXO from the shareable.
try:
incoming_dxo = from_shareable(shareable)
except Exception as e:
self.system_panic(f"Unable to convert shareable to model definition. Exception {e.__str__()}", fl_ctx)
return make_reply(ReturnCode.BAD_TASK_DATA)
# Information about workflow is retrieved from the shareable header.
current_round = shareable.get_header(AppConstants.CURRENT_ROUND, None)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS, None)
# Ensure that data is of type weights. Extract model data.
if incoming_dxo.data_kind != DataKind.WEIGHTS:
self.system_panic("Model DXO should be of kind DataKind.WEIGHTS.", fl_ctx)
return make_reply(ReturnCode.BAD_TASK_DATA)
weights = incoming_dxo.data
# Display properties.
self.log_info(fl_ctx, f"Incoming data kind: {incoming_dxo.data_kind}")
self.log_info(fl_ctx, f"Model: \n{weights}")
self.log_info(fl_ctx, f"Current Round: {current_round}")
self.log_info(fl_ctx, f"Total Rounds: {total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Doing some dummy training.
new_weights = {}
if weights:
if NPConstants.NUMPY_KEY in weights:
new_weights[NPConstants.NUMPY_KEY] = weights[NPConstants.NUMPY_KEY] + self._delta
else:
self.log_error(fl_ctx, "numpy_key not found in model.")
return make_reply(ReturnCode.BAD_TASK_DATA)
else:
self.log_error(fl_ctx, "No model weights found in shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# We check abort_signal regularly to make sure
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save local numpy model
try:
self._save_local_model(fl_ctx, weights)
except Exception as e:
self.log_error(fl_ctx, f"Exception in saving local model: {e}.")
self.log_info(
fl_ctx,
f"Model after training: {new_weights}",
)
# Checking abort signal again.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
weights_diff = {k: new_weights[k] - weights[k] for k in new_weights.keys()}
# Prepare a DXO for our updated model. Create shareable and return
outgoing_dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=weights_diff, meta={})
return outgoing_dxo.to_shareable()
| NVFlare-main | tests/integration_test/data/apps/np_sag_weights_diff/custom/np_trainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.client import Client
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
def _prepare_training_ctx(client_task: ClientTask, fl_ctx: FLContext):
task = client_task.task
fl_ctx.set_prop("current_round", task.props["round"], private=False)
fl_ctx.set_prop("total_rounds", task.props["total"], private=False)
def _process_training_result(client_task: ClientTask, fl_ctx: FLContext):
task = client_task.task
task.data = client_task.result
class CustomController(Controller):
def __init__(
self,
min_clients: int,
num_rounds: int,
persistor_id="persistor",
shareable_generator_id="shareable_generator",
):
Controller.__init__(self)
self.persistor_id = persistor_id
self.shareable_generator_id = shareable_generator_id
self.persistor = None
self.shareable_gen = None
# config data
self._min_clients = min_clients
self._num_rounds = num_rounds
# workflow phases: init, train
self._phase = "init"
self._global_model = None
def start_controller(self, fl_ctx: FLContext):
self._phase = "init"
self.shareable_gen = self._engine.get_component(self.shareable_generator_id)
if not isinstance(self.shareable_gen, ShareableGenerator):
self.system_panic("shareable_gen should be an instance of ShareableGenerator.", fl_ctx)
self.persistor = self._engine.get_component(self.persistor_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic("persistor should be an instance of LearnablePersistor.", fl_ctx)
self._global_model = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_model, private=True, sticky=True)
def process_result_of_unknown_task(
self,
client: Client,
task_name: str,
client_task_id: str,
result: Shareable,
fl_ctx: FLContext,
):
return None
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
self._phase = "train"
for r in range(self._num_rounds):
if not abort_signal:
return
task = Task(
name="poc",
data=self.shareable_gen.learnable_to_shareable(self._global_model, fl_ctx),
props={"round": r, "total": self._num_rounds},
timeout=0,
before_task_sent_cb=_prepare_training_ctx,
result_received_cb=_process_training_result,
)
client_list = self._engine.get_clients()
for c in client_list:
self.log_info(fl_ctx, f"@@@ client name {c.name}")
self.log_info(fl_ctx, f"@@@ Broadcast and wait {task.name}")
self.broadcast_and_wait(
task=task,
fl_ctx=fl_ctx,
targets=None,
min_responses=self._min_clients,
abort_signal=abort_signal,
)
self.log_info(fl_ctx, f"@@@ Broadcast and wait - end {task.name}")
self._global_model = self.shareable_gen.shareable_to_learnable(task.data, fl_ctx)
self.persistor.save(self._global_model, fl_ctx)
self.logger.info("model saved")
def stop_controller(self, fl_ctx: FLContext):
self._phase = "finished"
| NVFlare-main | tests/integration_test/data/apps/tb_streaming/custom/custom_controller.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/integration_test/data/apps/tb_streaming/custom/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from nvflare.apis.analytix import AnalyticsDataType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.widgets.streaming import create_analytic_dxo, send_analytic_dxo
class CustomExecutor(Executor):
def __init__(self, task_name: str = "poc"):
super().__init__()
if not isinstance(task_name, str):
raise TypeError("task name should be a string.")
self.task_name = task_name
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
if task_name == self.task_name:
peer_ctx = fl_ctx.get_prop(FLContextKey.PEER_CONTEXT)
r = peer_ctx.get_prop("current_round")
number = random.random()
# send analytics
dxo = create_analytic_dxo(
tag="random_number", value=number, data_type=AnalyticsDataType.SCALAR, global_step=r
)
send_analytic_dxo(comp=self, dxo=dxo, fl_ctx=fl_ctx)
dxo = create_analytic_dxo(
tag="debug_msg", value="Hello world", data_type=AnalyticsDataType.TEXT, global_step=r
)
send_analytic_dxo(comp=self, dxo=dxo, fl_ctx=fl_ctx)
time.sleep(2.0)
return shareable
else:
raise ValueError(f'No such supported task "{task_name}". Implemented task name is {self.task_name}')
| NVFlare-main | tests/integration_test/data/apps/tb_streaming/custom/custom_executor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import tempfile
import time
from nvflare.tool.poc.poc_commands import _prepare_poc
from .constants import CLIENT_NVF_CONFIG, CLIENT_SCRIPT, SERVER_NVF_CONFIG, SERVER_SCRIPT
from .site_launcher import ServerProperties, SiteLauncher, SiteProperties, run_command_in_subprocess
from .utils import cleanup_job_and_snapshot, update_job_store_path_in_workspace, update_snapshot_path_in_workspace
def _get_client_name(client_id: int):
return f"site-{client_id}"
class POCSiteLauncher(SiteLauncher):
def __init__(self, n_servers: int, n_clients: int):
"""Launches and keeps track of servers and clients."""
super().__init__()
self.poc_temp_dir = tempfile.mkdtemp()
if os.path.exists(self.poc_temp_dir):
shutil.rmtree(self.poc_temp_dir)
_prepare_poc(clients=[], number_of_clients=n_clients, workspace=self.poc_temp_dir)
self.poc_dir = os.path.join(self.poc_temp_dir, "example_project", "prod_00")
print(f"Using POC at dir: {self.poc_dir}")
self.n_servers = n_servers
self.n_clients = n_clients
def start_overseer(self):
raise RuntimeError("POC mode does not have overseer.")
def stop_overseer(self):
pass
def prepare_workspace(self) -> str:
update_job_store_path_in_workspace(self.poc_dir, "server")
update_snapshot_path_in_workspace(self.poc_dir, "server")
cleanup_job_and_snapshot(self.poc_dir, "server")
return self.poc_dir
def start_servers(self):
for i in range(self.n_servers):
self.start_server(i)
time.sleep(1)
def start_clients(self):
for i in range(1, self.n_clients + 1):
self.start_client(i)
def start_server(self, server_id: int):
# keeping the signature of start_server() consistent, but POC should only have one server
# with server_id = 0
server_name = "server"
server_dir_name = os.path.join(self.poc_dir, server_name)
command = (
f"{sys.executable} -m {SERVER_SCRIPT}"
f" -m {server_dir_name} -s {SERVER_NVF_CONFIG}"
" --set secure_train=true org=nvidia config_folder=config"
)
process = run_command_in_subprocess(command)
self.server_properties[server_name] = ServerProperties(
name=server_name, root_dir=server_dir_name, process=process, port=f"8{server_id}03"
)
print(f"Launched server ({server_name}) using {command}. process_id: {process.pid}")
def start_client(self, client_id: int):
client_name = _get_client_name(client_id)
client_dir_name = os.path.join(self.poc_dir, client_name)
# Launch the new client
command = (
f"{sys.executable} -m {CLIENT_SCRIPT}"
f" -m {client_dir_name} -s {CLIENT_NVF_CONFIG}"
f" --set secure_train=true org=nvidia config_folder=config uid={client_name}"
)
process = run_command_in_subprocess(command)
self.client_properties[client_name] = SiteProperties(
name=client_name, root_dir=client_dir_name, process=process
)
print(f"Launched client {client_name} process using {command}. process_id: {process.pid}")
def cleanup(self):
cleanup_job_and_snapshot(self.poc_dir, "server")
print(f"Deleting temporary directory: {self.poc_temp_dir}.")
shutil.rmtree(self.poc_temp_dir)
| NVFlare-main | tests/integration_test/src/poc_site_launcher.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing
from nvflare.fuel.hci.client.api_status import APIStatus
if typing.TYPE_CHECKING:
from .nvf_test_driver import NVFTestDriver
import time
from abc import ABC, abstractmethod
from nvflare.fuel.hci.client.fl_admin_api import FLAdminAPI
from tests.integration_test.src.utils import check_job_done, run_admin_api_tests
class _CmdHandler(ABC):
@abstractmethod
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
pass
class _StartHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
if command_args[0] == "server":
if len(command_args) == 2:
# if server id is provided
server_ids = [command_args[1]]
else:
# start all servers
server_ids = list(admin_controller.site_launcher.server_properties.keys())
for sid in server_ids:
admin_controller.site_launcher.start_server(sid)
admin_controller.super_admin_api.login(username=admin_controller.super_admin_user_name)
elif command_args[0] == "client":
if len(command_args) == 2:
# if client id is provided
client_ids = [command_args[1]]
else:
# start all clients
client_ids = list(admin_controller.site_launcher.client_properties.keys())
for cid in client_ids:
admin_controller.site_launcher.start_client(cid)
elif command_args[0] == "overseer":
admin_controller.site_launcher.start_overseer()
else:
raise RuntimeError(f"Target {command_args[0]} is not supported.")
class _KillHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
if command_args[0] == "server":
if len(command_args) == 2:
# if server id is provided
server_id = command_args[1]
else:
# kill active server
server_id = admin_controller.site_launcher.get_active_server_id(admin_api.port)
admin_controller.site_launcher.stop_server(server_id)
elif command_args[0] == "overseer":
admin_controller.site_launcher.stop_overseer()
elif command_args[0] == "client":
if len(command_args) == 2:
# if client id is provided
client_ids = [command_args[1]]
else:
# close all clients
client_ids = list(admin_controller.site_launcher.client_properties.keys())
for cid in client_ids:
admin_api.remove_client([admin_controller.site_launcher.client_properties[cid].name])
admin_controller.site_launcher.stop_client(cid)
class _SleepHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
time.sleep(int(command_args[0]))
class _AdminCommandsHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
run_admin_api_tests(admin_api)
class _NoopHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
pass
class _TestDoneHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
admin_controller.test_done = True
class _SubmitJobHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
job_name = str(command_args[0])
response = admin_api.submit_job(job_name)
if response["status"] == APIStatus.ERROR_RUNTIME:
admin_controller.admin_api_response = response.get("raw", {}).get("data")
elif response["status"] == APIStatus.ERROR_AUTHORIZATION:
admin_controller.admin_api_response = response["details"]
elif response["status"] == APIStatus.SUCCESS:
admin_controller.job_id = response["details"]["job_id"]
admin_controller.last_job_name = job_name
class _CloneJobHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
response = admin_api.clone_job(admin_controller.job_id)
if response["status"] == APIStatus.ERROR_RUNTIME:
admin_controller.admin_api_response = response.get("raw", {}).get("data")
elif response["status"] == APIStatus.ERROR_AUTHORIZATION:
admin_controller.admin_api_response = response["details"]
if response["status"] == APIStatus.SUCCESS:
admin_controller.job_id = response["details"]["job_id"]
class _AbortJobHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
response = admin_api.abort_job(admin_controller.job_id)
if response["status"] == APIStatus.ERROR_RUNTIME:
admin_controller.admin_api_response = response.get("raw", {}).get("data")
elif response["status"] == APIStatus.ERROR_AUTHORIZATION:
admin_controller.admin_api_response = response["details"]
class _ListJobHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
response = admin_api.list_jobs()
assert response["status"] == APIStatus.SUCCESS
class _ShellCommandHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
if str(command_args[0]) == "ls":
response = admin_api.ls_target(str(command_args[1]))
if response["status"] == APIStatus.ERROR_RUNTIME:
admin_controller.admin_api_response = response.get("raw", {}).get("data")
elif response["status"] == APIStatus.ERROR_AUTHORIZATION:
admin_controller.admin_api_response = response["details"]["message"]
elif response["status"] == APIStatus.SUCCESS:
admin_controller.admin_api_response = " ".join(response["details"]["message"].splitlines())
class _CheckJobHandler(_CmdHandler):
def handle(self, command_args: list, admin_controller: NVFTestDriver, admin_api: FLAdminAPI):
timeout = 1
if command_args:
timeout = float(command_args[0])
start_time = time.time()
result = False
if admin_controller.job_id:
while time.time() - start_time < timeout:
result = check_job_done(job_id=admin_controller.job_id, admin_api=admin_controller.super_admin_api)
if result:
break
time.sleep(0.5)
admin_controller.test_done = result
| NVFlare-main | tests/integration_test/src/action_handlers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# need to be consistent with provision
RESOURCE_CONFIG = "resources.json"
DEFAULT_RESOURCE_CONFIG = "resources.json.default"
SERVER_NVF_CONFIG = "fed_server.json"
CLIENT_NVF_CONFIG = "fed_client.json"
FILE_STORAGE = "nvflare.app_common.storages.filesystem_storage.FilesystemStorage"
SERVER_SCRIPT = "nvflare.private.fed.app.server.server_train"
CLIENT_SCRIPT = "nvflare.private.fed.app.client.client_train"
# provision
PROVISION_SCRIPT = "nvflare.cli provision"
# preflight check
PREFLIGHT_CHECK_SCRIPT = "nvflare.cli preflight_check"
| NVFlare-main | tests/integration_test/src/constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .nvf_test_driver import NVFTestDriver, NVFTestError
from .oa_laucher import OALauncher
from .poc_site_launcher import POCSiteLauncher
from .provision_site_launcher import ProvisionSiteLauncher
from .site_launcher import ServerProperties, SiteProperties
from .utils import cleanup_path, generate_test_config_yaml_for_example, read_yaml, run_command_in_subprocess
| NVFlare-main | tests/integration_test/src/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from threading import Thread
from nvflare.ha.overseer_agent import HttpOverseerAgent
class OALauncher:
"""Overseer and overseer agent launcher."""
def __init__(self):
self._agent_dict = dict(server=dict(), client=dict())
self._overseer_process = None
def start_overseer(self):
new_env = os.environ.copy()
command = [sys.executable, "-m", "nvflare.ha.overseer.overseer"]
process = subprocess.Popen(
command,
preexec_fn=os.setsid,
env=new_env,
)
print("Starting overseer ...")
self._overseer_process = process
def _get_agent(self, agent_id):
agent = self._agent_dict["server"].get(agent_id)
if agent is not None:
return agent
agent = self._agent_dict["client"].get(agent_id)
if agent is not None:
return agent
raise ValueError(f"{agent_id} not found in current agent list")
def start_servers(self, number):
agent_id_list = list()
for i in range(number):
agent_id = f"server{i:02d}"
agent = HttpOverseerAgent(
"server",
"http://localhost:5000/api/v1",
project="test_project",
name=agent_id,
fl_port=str(8000 + i),
admin_port=str(8100 + i),
)
thread = Thread(target=agent.start, name=agent_id)
thread.start()
time.sleep(10)
self._agent_dict["server"][agent_id] = agent
agent_id_list.append(agent_id)
return agent_id_list
def start_clients(self, number):
agent_id_list = list()
for i in range(number):
agent_id = f"client{i:02d}"
agent = HttpOverseerAgent("client", "http://localhost:5000/api/v1", project="test_project", name=agent_id)
thread = Thread(target=agent.start, name=agent_id)
thread.start()
self._agent_dict["client"][agent_id] = agent
agent_id_list.append(agent_id)
return agent_id_list
def _pause(self, agent_id):
agent = self._get_agent(agent_id)
agent.pause()
def _resume(self, agent_id):
agent = self._get_agent(agent_id)
agent.resume()
def pause_server(self, agent_id):
self._pause(agent_id)
def resume_server(self, agent_id):
self._resume(agent_id)
def pause_client(self, agent_id):
self._pause(agent_id)
def resume_client(self, agent_id):
self._resume(agent_id)
def get_primary_sp(self, agent_id):
agent = self._get_agent(agent_id)
return agent.get_primary_sp()
def get_overseer_info(self, agent_id):
agent = self._get_agent(agent_id)
return agent._overseer_info
def _stop(self, role):
stopped_list = list()
for key, agent in self._agent_dict[role].items():
agent.end()
stopped_list.append(key)
return stopped_list
def stop_servers(self):
return self._stop("server")
def stop_clients(self):
return self._stop("client")
def stop_overseer(self):
self._overseer_process.terminate()
try:
self._overseer_process.wait(timeout=10)
except subprocess.TimeoutExpired as e:
print(f"overseer failed to stop due to {e}")
| NVFlare-main | tests/integration_test/src/oa_laucher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from nvflare.apis.job_def import RunStatus
from nvflare.fuel.hci.client.api_status import APIStatus
from nvflare.fuel.hci.client.fl_admin_api_spec import TargetType
from tests.integration_test.src.action_handlers import (
_AbortJobHandler,
_AdminCommandsHandler,
_CheckJobHandler,
_CloneJobHandler,
_KillHandler,
_ListJobHandler,
_NoopHandler,
_ShellCommandHandler,
_SleepHandler,
_StartHandler,
_SubmitJobHandler,
_TestDoneHandler,
)
from tests.integration_test.src.site_launcher import SiteLauncher
from tests.integration_test.src.utils import (
check_client_status_ready,
create_admin_api,
ensure_admin_api_logged_in,
get_job_meta,
)
class NVFTestError(Exception):
pass
def _parse_workflow_states(stats_message: dict):
# {
# 'ScatterAndGather':
# {'tasks': {'train': []}, 'phase': 'train', 'current_round': 1, 'num_rounds': 2},
# 'ServerRunner':
# {'job_id': 'xxx', 'status': 'started', 'workflow': 'scatter_and_gather'}
# }
workflow_states = {}
if not stats_message:
return workflow_states
for k, v in stats_message.items():
# each controller inherit from nvflare/apis/impl/controller has tasks
if v.get("tasks"):
workflow_states[k] = v.copy()
workflow_states[k].pop("tasks")
return workflow_states
def _check_dict_b_value_same_as_dict_a_for_keys_in_dict_a(dict_a: dict, dict_b: dict):
if not dict_a and not dict_b:
return True
if dict_a and not dict_b:
return False
for k in dict_a:
if isinstance(dict_a[k], dict):
if not _check_dict_b_value_same_as_dict_a_for_keys_in_dict_a(dict_a[k], dict_b[k]):
return False
elif dict_b.get(k) != dict_a[k]:
return False
return True
def _check_run_state(state, expected_state):
for k, v in expected_state.items():
print(f"ASSERT Expected State {k}: {v} is part of Current State {k}: {state[k]}")
if isinstance(v, dict):
assert _check_dict_b_value_same_as_dict_a_for_keys_in_dict_a(dict_a=v, dict_b=state[k])
else:
assert state[k] == v
print("\n")
def _check_event_trigger(
event_trigger,
string_to_check: str = None,
run_state: dict = None,
) -> bool:
"""check if a run state trigger an event trigger."""
if isinstance(event_trigger, dict):
if run_state is None:
raise NVFTestError("Event trigger is of dict type but run_state is not provided.")
return _check_dict_b_value_same_as_dict_a_for_keys_in_dict_a(event_trigger, run_state)
elif isinstance(event_trigger, str):
if string_to_check is None:
raise NVFTestError("Event trigger is of str type but string_to_check is not provided.")
return event_trigger in string_to_check
else:
raise NVFTestError(f"event_trigger type {type(event_trigger)} is not supported.")
def _update_run_state(stats: dict, run_state: dict, job_run_status: str):
# extract run_state from stats
# for stats structure please refer to "nvflare/private/fed/server/info_coll_cmd.py"
# {'status': <APIStatus.SUCCESS: 'SUCCESS'>,
# 'details': {
# 'message': {
# 'server': {
# 'ScatterAndGather': {
# 'tasks': {'train': []},
# 'phase': 'train',
# 'current_round': 0,
# 'num_rounds': 2},
# 'CrossSiteModelEval':
# {'tasks': {}},
# 'ServerRunner': {
# 'job_id': XXX,
# 'status': 'started',
# 'workflow': 'scatter_and_gather'
# }
# }
# }
# },
# 'raw': {'time': '2022-04-04 15:13:09.367350', 'data': [xxx], 'status': <APIStatus.SUCCESS: 'SUCCESS'>}}
prev_run_state = run_state.copy()
# parse stats
if (
stats
and "status" in stats
and stats["status"] == APIStatus.SUCCESS
and "details" in stats
and "message" in stats["details"]
and isinstance(stats["details"]["message"], dict)
and "server" in stats["details"]["message"]
):
run_state["workflows"] = _parse_workflow_states(stats_message=stats["details"]["message"]["server"])
# parse job status
run_state["run_finished"] = job_run_status == RunStatus.FINISHED_COMPLETED.value
return run_state != prev_run_state, run_state
class NVFTestDriver:
def __init__(self, download_root_dir: str, site_launcher: SiteLauncher, poll_period=1):
"""FL system test driver.
Args:
download_root_dir: the root dir to download things to
site_launcher (SiteLauncher): a SiteLauncher object
poll_period (int): note that this value can't be too small,
otherwise will have resource issue
"""
self.download_root_dir = download_root_dir
self.site_launcher = site_launcher
self.poll_period = poll_period
self.super_admin_api = None
self.super_admin_user_name = None
self.admin_api_response = None
self.admin_apis = {}
self.logger = logging.getLogger(self.__class__.__name__)
self.test_done = False
self.job_id = None
self.last_job_name = None
self.action_handlers = {
"start": _StartHandler(),
"kill": _KillHandler(),
"sleep": _SleepHandler(),
"no_op": _NoopHandler(),
"mark_test_done": _TestDoneHandler(),
"run_admin_commands": _AdminCommandsHandler(),
"submit_job": _SubmitJobHandler(),
"clone_job": _CloneJobHandler(),
"abort_job": _AbortJobHandler(),
"list_job": _ListJobHandler(),
"shell_commands": _ShellCommandHandler(),
"ensure_current_job_done": _CheckJobHandler(),
}
def initialize_super_user(self, workspace_root_dir: str, upload_root_dir: str, poc: bool, super_user_name: str):
self.super_admin_user_name = super_user_name
try:
admin_api = create_admin_api(
workspace_root_dir=workspace_root_dir,
upload_root_dir=upload_root_dir,
download_root_dir=self.download_root_dir,
admin_user_name=super_user_name,
poc=poc,
)
login_result = ensure_admin_api_logged_in(admin_api)
except Exception as e:
raise NVFTestError(f"create and login to admin failed: {e}")
if not login_result:
raise NVFTestError(f"initialize_super_user {super_user_name} failed.")
self.super_admin_api = admin_api
def initialize_admin_users(self, workspace_root_dir: str, upload_root_dir: str, poc: bool, admin_user_names: list):
for user_name in admin_user_names:
if user_name == self.super_admin_user_name:
continue
try:
admin_api = create_admin_api(
workspace_root_dir=workspace_root_dir,
upload_root_dir=upload_root_dir,
download_root_dir=self.download_root_dir,
admin_user_name=user_name,
poc=poc,
)
login_result = ensure_admin_api_logged_in(admin_api)
except Exception as e:
self.admin_apis = None
raise NVFTestError(f"create and login to admin failed: {e}")
if not login_result:
self.admin_apis = None
raise NVFTestError(f"initialize_admin_users {user_name} failed.")
self.admin_apis[user_name] = admin_api
def get_job_result(self, job_id: str):
command_name = "download_job"
response = self.super_admin_api.do_command(f"{command_name} {job_id}")
if response["status"] != APIStatus.SUCCESS:
raise NVFTestError(f"{command_name} failed: {response}")
run_data = {
"job_id": job_id,
"workspace_root": os.path.join(self.download_root_dir, job_id, "workspace"),
}
return run_data
def ensure_clients_started(self, num_clients: int, timeout: int):
start_time = time.time()
clients_up = False
while not clients_up:
if time.time() - start_time > timeout:
raise NVFTestError(f"Clients could not be started in {timeout} seconds.")
time.sleep(0.5)
response = self.super_admin_api.check_status(target_type=TargetType.CLIENT)
print(f"Check client status response is {response}")
if not check_client_status_ready(response):
# clients not ready
continue
# this coming from private/fed/server/training_cmds.py
for row in response["details"]["client_statuses"][1:]:
if row[3] != "No Jobs":
raise NVFTestError("Clients started with left-over jobs.")
# wait for all clients to come up
if len(response["details"]["client_statuses"]) < num_clients + 1:
continue
clients_up = True
print("All clients are up.")
def server_status(self):
response = self.super_admin_api.check_status(target_type=TargetType.SERVER)
if response and "status" in response and response["status"] == APIStatus.SUCCESS and "details" in response:
return response["details"]
return None
def client_status(self):
response = self.super_admin_api.check_status(target_type=TargetType.CLIENT)
if response and "status" in response and response["status"] == APIStatus.SUCCESS and "details" in response:
return response["details"]
return None
def _get_stats(self, target: str, job_id: str):
return self.super_admin_api.show_stats(job_id, target)
def _get_job_log(self, target: str, job_id: str):
job_log_file = os.path.join(job_id, "log.txt")
logs = self.super_admin_api.cat_target(target, file=job_log_file)["details"]["message"].splitlines()
return logs
def _get_site_log(self, target: str):
logs = self.super_admin_api.cat_target(target, file="log.txt")["details"]["message"].splitlines()
return logs
def _print_state(self, state: dict, length: int = 30):
self.logger.info("\n" + "-" * length)
for k, v in state.items():
self.logger.info(f"{k}: {v}")
self.logger.info("-" * length + "\n")
def _get_run_state(self, run_state):
if self.job_id and self.super_admin_api:
job_meta = get_job_meta(self.super_admin_api, job_id=self.job_id)
job_run_status = job_meta.get("status")
stats = self._get_stats(target=TargetType.SERVER, job_id=self.job_id)
# update run_state
changed, run_state = _update_run_state(stats=stats, run_state=run_state, job_run_status=job_run_status)
return run_state
def reset_test_info(self, reset_job_info=False):
self.test_done = False
self.admin_api_response = None
if reset_job_info:
self.job_id = None
self.last_job_name = None
def run_event_sequence(self, event_sequence):
run_state = {"run_finished": None, "workflows": None}
event_idx = 0
# whether event has been successfully triggered
event_triggered = [False for _ in range(len(event_sequence))]
self.test_done = False
while not self.test_done:
run_state = self._get_run_state(run_state)
if event_idx < len(event_sequence):
if not event_triggered[event_idx]:
# check if event is triggered -> then execute the corresponding actions
event_trigger = event_sequence[event_idx]["trigger"]
strings_to_check = None
# prepare to check for trigger for different trigger type
if event_trigger["type"] == "server_log":
server_logs = self._get_site_log(target=TargetType.SERVER)
strings_to_check = "\n".join(server_logs)
elif event_trigger["type"] == "client_log":
client_logs = self._get_site_log(target=event_trigger["args"]["target"])
strings_to_check = "\n".join(client_logs)
elif event_trigger["type"] == "server_job_log":
if not self.job_id:
raise NVFTestError("No submitted jobs.")
server_logs = self._get_job_log(target=TargetType.SERVER, job_id=self.job_id)
strings_to_check = "\n".join(server_logs)
elif event_trigger["type"] == "client_job_log":
if not self.job_id:
raise NVFTestError("No submitted jobs.")
client_logs = self._get_job_log(target=event_trigger["args"]["target"], job_id=self.job_id)
strings_to_check = "\n".join(client_logs)
elif event_trigger["type"] != "run_state":
raise NVFTestError(f"This trigger type {event_trigger['type']} is not supported.")
trigger_data = event_trigger["data"]
if _check_event_trigger(
event_trigger=trigger_data, string_to_check=strings_to_check, run_state=run_state
):
print(f"EVENT TRIGGER '{trigger_data}' is TRIGGERED.")
event_triggered[event_idx] = True
self.execute_actions(
actions=event_sequence[event_idx]["actions"],
admin_user_name=event_sequence[event_idx].get("admin_user_name"),
)
if event_triggered[event_idx]:
result = event_sequence[event_idx]["result"]
if result["type"] == "run_state":
# check result state only when server is up and running
if self.server_status() is not None:
run_state = self._get_run_state(run_state)
# compare run_state to expected result data from the test case
_check_run_state(state=run_state, expected_state=result["data"])
event_idx += 1
elif result["type"] == "admin_api_response":
if self.admin_api_response is None:
raise NVFTestError("Missing admin_api_response.")
assert (
self.admin_api_response == result["data"]
), f"Failed: admin_api_response: {self.admin_api_response} does not equal to result {result['data']}"
event_idx += 1
elif result["type"] == "job_submit_success":
if self.job_id is None or self.last_job_name is None:
raise NVFTestError(f"Job submission failed with: {self.admin_api_response}")
event_idx += 1
time.sleep(self.poll_period)
assert all(event_triggered), "Test failed: not all test events were triggered"
def execute_actions(self, actions, admin_user_name):
for action in actions:
tokens = action.split(" ")
command = tokens[0]
args = tokens[1:]
print(f"ACTION: {action} ADMIN_USER_NAME: {admin_user_name}")
if command not in self.action_handlers:
raise NVFTestError(f"Action {command} is not supported.")
if admin_user_name is None:
admin_api = self.super_admin_api
else:
admin_api = self.admin_apis[admin_user_name]
self.action_handlers[command].handle(command_args=args, admin_controller=self, admin_api=admin_api)
def finalize(self):
if self.super_admin_api:
if self.job_id:
self.super_admin_api.abort_job(self.job_id)
for k in self.admin_apis:
self.admin_apis[k].close()
self.super_admin_api.shutdown(target_type=TargetType.ALL)
self.super_admin_api.close()
time.sleep(1)
| NVFlare-main | tests/integration_test/src/nvf_test_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
class Example:
"""This class represents a standardized example structure in NVFlare."""
def __init__(
self,
root: str,
jobs_folder_in_example: str = "jobs",
requirements: str = "requirements.txt",
additional_python_path: Optional[str] = None,
prepare_data_script: Optional[str] = None,
):
self.root = os.path.abspath(root)
if not os.path.exists(self.root):
raise FileNotFoundError("Example root directory does not exist.")
self.name = os.path.basename(self.root)
self.jobs_root_dir = os.path.join(self.root, jobs_folder_in_example)
if not os.path.exists(self.jobs_root_dir):
raise FileNotFoundError("Example's jobs root directory does not exist.")
self.requirements_file = os.path.join(self.root, requirements)
if not os.path.exists(self.requirements_file):
raise FileNotFoundError("Example's requirements file does not exist.")
self.additional_python_paths = [self.root]
if additional_python_path is not None:
if not os.path.exists(additional_python_path):
raise FileNotFoundError(f"Additional python path ({additional_python_path}) does not exist")
self.additional_python_paths.append(os.path.abspath(additional_python_path))
if prepare_data_script is not None:
prepare_data_script = os.path.join(self.root, prepare_data_script)
if not os.path.exists(prepare_data_script):
raise FileNotFoundError(f"Prepare_data_script ({prepare_data_script}) does not exist")
self.prepare_data_script = prepare_data_script
| NVFlare-main | tests/integration_test/src/example.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
from typing import List
import yaml
from nvflare.apis.job_def import RunStatus
from nvflare.fuel.hci.client.api_status import APIStatus
from nvflare.fuel.hci.client.fl_admin_api import FLAdminAPI
from nvflare.fuel.hci.client.fl_admin_api_constants import FLDetailKey
from nvflare.fuel.hci.client.fl_admin_api_spec import TargetType
from nvflare.fuel.utils.class_utils import instantiate_class
from .constants import DEFAULT_RESOURCE_CONFIG, FILE_STORAGE, PROVISION_SCRIPT, RESOURCE_CONFIG
from .example import Example
OUTPUT_YAML_DIR = os.path.join("data", "test_configs", "generated")
PROJECT_YAML = os.path.join("data", "projects", "ha_1_servers_2_clients.yml")
POSTFIX = "_copy"
REQUIREMENTS_TO_EXCLUDE = ["nvflare", "jupyter", "notebook"]
def read_yaml(yaml_file_path):
if not os.path.exists(yaml_file_path):
raise RuntimeError(f"Yaml file doesnt' exist at {yaml_file_path}")
with open(yaml_file_path, "rb") as f:
data = yaml.safe_load(f)
return data
def cleanup_path(path: str):
if os.path.exists(path):
print(f"Clean up directory: {path}")
shutil.rmtree(path)
def run_provision_command(project_yaml: str, workspace: str):
command = f"{sys.executable} -m {PROVISION_SCRIPT} -p {project_yaml} -w {workspace}"
process = run_command_in_subprocess(command)
process.wait()
def run_command_in_subprocess(command):
new_env = os.environ.copy()
python_path = ":".join(sys.path)[1:] # strip leading colon
new_env["PYTHONPATH"] = python_path
process = subprocess.Popen(
shlex.split(command),
preexec_fn=os.setsid,
env=new_env,
)
return process
def _get_resource_json_file(workspace_path: str, site_name: str) -> str:
resource_json_path = os.path.join(workspace_path, site_name, "local", RESOURCE_CONFIG)
if not os.path.exists(resource_json_path):
default_json_path = os.path.join(workspace_path, site_name, "local", DEFAULT_RESOURCE_CONFIG)
if not os.path.exists(default_json_path):
raise RuntimeError(f"Missing {RESOURCE_CONFIG} at: {resource_json_path}")
resource_json_path = default_json_path
return resource_json_path
def _check_snapshot_persistor_in_resource(resource_json: dict):
if "snapshot_persistor" not in resource_json:
raise RuntimeError(f"Missing snapshot_persistor in {RESOURCE_CONFIG}")
if "args" not in resource_json["snapshot_persistor"]:
raise RuntimeError("Missing args in snapshot_persistor")
if "storage" not in resource_json["snapshot_persistor"]["args"]:
raise RuntimeError("Missing storage in snapshot_persistor's args")
if "args" not in resource_json["snapshot_persistor"]["args"]["storage"]:
raise RuntimeError("Missing args in snapshot_persistor's storage")
if "path" not in resource_json["snapshot_persistor"]["args"]["storage"]:
raise RuntimeError("Missing path in snapshot_persistor's storage")
if resource_json["snapshot_persistor"]["args"]["storage"]["path"] != FILE_STORAGE:
raise RuntimeError(f"Only support {FILE_STORAGE} storage in snapshot_persistor's args")
if "root_dir" not in resource_json["snapshot_persistor"]["args"]["storage"]["args"]:
raise RuntimeError("Missing root_dir in snapshot_persistor's storage's args")
return True
def _get_snapshot_path_from_workspace(path: str, server_name: str) -> str:
resource_json_path = _get_resource_json_file(path, server_name)
with open(resource_json_path, "r") as f:
resource_json = json.load(f)
_check_snapshot_persistor_in_resource(resource_json)
return resource_json["snapshot_persistor"]["args"]["storage"]["args"]["root_dir"]
def update_snapshot_path_in_workspace(path: str, server_name: str, snapshot_path: str = None):
new_snapshot_path = snapshot_path if snapshot_path else tempfile.mkdtemp()
resource_json_path = _get_resource_json_file(workspace_path=path, site_name=server_name)
with open(resource_json_path, "r") as f:
resource_json = json.load(f)
_check_snapshot_persistor_in_resource(resource_json)
resource_json["snapshot_persistor"]["args"]["storage"]["args"]["root_dir"] = new_snapshot_path
with open(resource_json_path, "w") as f:
json.dump(resource_json, f)
return new_snapshot_path
def _check_job_store_in_resource(resource_json: dict):
if "components" not in resource_json:
raise RuntimeError(f"Missing components in {RESOURCE_CONFIG}")
job_manager_config = None
for c in resource_json["components"]:
if "id" in c and c["id"] == "job_manager":
job_manager_config = c
if not job_manager_config:
raise RuntimeError(f"Missing job_manager in {RESOURCE_CONFIG}")
if "args" not in job_manager_config:
raise RuntimeError("Missing args in job_manager.")
if "uri_root" not in job_manager_config["args"]:
raise RuntimeError("Missing uri_root in job_manager's args.")
def _get_job_store_path_from_workspace(path: str, server_name: str) -> str:
resource_json_path = _get_resource_json_file(path, server_name)
with open(resource_json_path, "r") as f:
resource_json = json.load(f)
_check_job_store_in_resource(resource_json)
for c in resource_json["components"]:
if "id" in c and c["id"] == "job_manager":
return c["args"]["uri_root"]
def update_job_store_path_in_workspace(path: str, server_name: str, job_store_path: str = None):
new_job_store_path = job_store_path if job_store_path else tempfile.mkdtemp()
resource_json_path = _get_resource_json_file(workspace_path=path, site_name=server_name)
with open(resource_json_path, "r") as f:
resource_json = json.load(f)
if "components" not in resource_json:
raise RuntimeError(f"Missing components in {RESOURCE_CONFIG}")
_check_job_store_in_resource(resource_json)
for c in resource_json["components"]:
if "id" in c and c["id"] == "job_manager":
c["args"]["uri_root"] = new_job_store_path
with open(resource_json_path, "w") as f:
json.dump(resource_json, f)
return new_job_store_path
def cleanup_job_and_snapshot(workspace: str, server_name: str):
job_store_path = _get_job_store_path_from_workspace(workspace, server_name)
snapshot_path = _get_snapshot_path_from_workspace(workspace, server_name)
cleanup_path(job_store_path)
cleanup_path(snapshot_path)
def get_job_meta(admin_api: FLAdminAPI, job_id: str) -> dict:
response = admin_api.do_command(f"get_job_meta {job_id}")
return response.get("meta", {}).get("job_meta", {})
def check_client_status_ready(response: dict) -> bool:
if response["status"] != APIStatus.SUCCESS:
return False
if "details" not in response:
return False
data = response.get("raw", {}).get("data", [])
if data:
for d in data:
if d.get("type") == "error":
return False
# check fuel/hci/client/fl_admin_api.py for parsing
if "client_statuses" not in response["details"]:
return False
return True
def check_job_done(job_id: str, admin_api: FLAdminAPI):
response = admin_api.check_status(target_type=TargetType.SERVER)
if response and "status" in response:
if response["status"] != APIStatus.SUCCESS:
print(f"Check server status failed: {response}.")
return False
else:
if "details" not in response:
print(f"Check server status missing details: {response}.")
return False
else:
# check if run is stopped
if (
FLDetailKey.SERVER_ENGINE_STATUS in response["details"]
and response["details"][FLDetailKey.SERVER_ENGINE_STATUS] == "stopped"
):
response = admin_api.check_status(target_type=TargetType.CLIENT)
if not check_client_status_ready(response):
print(f"Check client status failed: {response}")
return False
else:
job_meta = get_job_meta(admin_api, job_id=job_id)
job_run_status = job_meta.get("status")
for row in response["details"]["client_statuses"]:
if row[3] != "stopped":
continue
# check if the current job is completed
if job_run_status in (
RunStatus.FINISHED_COMPLETED.value,
RunStatus.FINISHED_ABORTED.value,
):
return True
return False
def run_admin_api_tests(admin_api: FLAdminAPI):
print(("\n" + "*" * 120) * 20)
print("\n" + "=" * 40)
print("\nRunning through tests of admin commands:")
print("\n" + "=" * 40)
print("\nActive SP:")
print(admin_api.get_active_sp().get("details"))
print("\nList SP:")
print(admin_api.list_sp().get("details"))
print("\nCommand: get_available_apps_to_upload")
print(admin_api.get_available_apps_to_upload())
print("\nList Jobs:")
list_jobs_return_rows = admin_api.list_jobs().get("details")
print(list_jobs_return_rows)
first_job = str(list_jobs_return_rows[0].get("job_id"))
print("\nCommand: ls server -a .")
ls_return_message = admin_api.ls_target("server", "-a", ".").get("details").get("message")
print(ls_return_message)
print("\nAssert Job {} is in the server root dir...".format(first_job))
assert first_job in ls_return_message
print("\nAborting Job {}:".format(first_job))
print("\n" + "=" * 50)
print(admin_api.abort_job(first_job).get("details").get("message"))
print("\n" + "=" * 50)
print("\nCommand: pwd")
print(admin_api.get_working_directory("server").get("details").get("message"))
print("\n" + "=" * 50)
print("Finished with admin commands testing through FLAdminAPI.")
def _replace_meta_json(meta_json_path: str):
with open(meta_json_path, "r+") as f:
job_meta = json.load(f)
if "resource_spec" in job_meta:
job_meta.pop("resource_spec")
job_meta["min_clients"] = 2
f.seek(0)
json.dump(job_meta, f, indent=4)
f.truncate()
def _replace_config_fed_server(server_json_path: str):
with open(server_json_path, "r+") as f:
config_fed_server = json.load(f)
config_fed_server["num_rounds"] = 2
config_fed_server["min_clients"] = 2
config_fed_server["TRAIN_SPLIT_ROOT"] = "/tmp/nvflare/test_data"
f.seek(0)
json.dump(config_fed_server, f, indent=4)
f.truncate()
def _replace_config_fed_client(client_json_path: str):
with open(client_json_path, "r+") as f:
config_fed_client = json.load(f)
config_fed_client["TRAIN_SPLIT_ROOT"] = "/tmp/nvflare/test_data"
config_fed_client["AGGREGATION_EPOCHS"] = 1
f.seek(0)
json.dump(config_fed_client, f, indent=4)
f.truncate()
def simplify_job(job_folder_path: str, postfix: str = POSTFIX):
new_job_folder_path = job_folder_path + postfix
shutil.copytree(job_folder_path, new_job_folder_path, dirs_exist_ok=True)
# update meta.json
_replace_meta_json(meta_json_path=os.path.join(new_job_folder_path, "meta.json"))
for root, dirs, files in os.walk(new_job_folder_path):
for file in files:
if file == "config_fed_server.json":
# set the num_rounds and TRAIN_SPLIT_ROOT in config_fed_server.json
_replace_config_fed_server(server_json_path=os.path.join(root, file))
elif file == "config_fed_client.json":
# set TRAIN_SPLIT_ROOT in config_fed_client.json
_replace_config_fed_client(client_json_path=os.path.join(root, file))
def generate_test_config_yaml_for_example(
example: Example,
project_yaml: str = PROJECT_YAML,
postfix: str = POSTFIX,
) -> List[str]:
"""Generates test configuration yaml for NVFlare example.
Args:
example: A well-formatted NVFlare example.
project_yaml: Project yaml file for the testing of this example.
postfix: Postfix for the newly generated job.
"""
output_yamls = []
os.makedirs(OUTPUT_YAML_DIR, exist_ok=True)
for job in os.listdir(example.jobs_root_dir):
output_yaml = os.path.join(OUTPUT_YAML_DIR, f"{example.name}_{job}.yml")
job_dir = os.path.join(example.jobs_root_dir, job)
requirements_file = os.path.join(example.root, example.requirements_file)
new_requirements_file = os.path.join(example.root, "temp_requirements.txt")
exclude_requirements = "\\|".join(REQUIREMENTS_TO_EXCLUDE)
setup = [
f"cp {requirements_file} {new_requirements_file}",
f"sed -i '/{exclude_requirements}/d' {new_requirements_file}",
f"pip install -r {new_requirements_file}",
]
if example.prepare_data_script is not None:
setup.append(f"bash {example.prepare_data_script}")
setup.append(f"python convert_to_test_job.py --job {job_dir} --post {postfix}")
setup.append(f"rm -f {new_requirements_file}")
config = {
"ha": True,
"jobs_root_dir": example.jobs_root_dir,
"cleanup": True,
"project_yaml": project_yaml,
"additional_python_paths": example.additional_python_paths,
"tests": [
{
"test_name": f"Test a simplified copy of job {job} for example {example.name}.",
"event_sequence": [
{
"trigger": {"type": "server_log", "data": "Server started"},
"actions": [f"submit_job {job}{postfix}"],
"result": {"type": "job_submit_success"},
},
{
"trigger": {"type": "run_state", "data": {"run_finished": True}},
"actions": ["ensure_current_job_done"],
"result": {"type": "run_state", "data": {"run_finished": True}},
},
],
"setup": setup,
"teardown": [f"rm -rf {job_dir}{postfix}"],
}
],
}
with open(output_yaml, "w") as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
output_yamls.append(output_yaml)
return output_yamls
def _read_admin_json_file(admin_json_file) -> dict:
if not os.path.exists(admin_json_file):
raise RuntimeError("Missing admin json file.")
with open(admin_json_file, "r") as f:
admin_json = json.load(f)
return admin_json
def create_admin_api(workspace_root_dir, upload_root_dir, download_root_dir, admin_user_name, poc):
admin_startup_folder = os.path.join(workspace_root_dir, admin_user_name, "startup")
admin_json_file = os.path.join(admin_startup_folder, "fed_admin.json")
admin_json = _read_admin_json_file(admin_json_file)
overseer_agent = instantiate_class(
class_path=admin_json["admin"]["overseer_agent"]["path"],
init_params=admin_json["admin"]["overseer_agent"]["args"],
)
ca_cert = os.path.join(admin_startup_folder, admin_json["admin"]["ca_cert"])
client_key = os.path.join(admin_startup_folder, admin_json["admin"]["client_key"])
client_cert = os.path.join(admin_startup_folder, admin_json["admin"]["client_cert"])
admin_api = FLAdminAPI(
upload_dir=upload_root_dir,
download_dir=download_root_dir,
overseer_agent=overseer_agent,
insecure=poc,
user_name=admin_user_name,
ca_cert=ca_cert,
client_key=client_key,
client_cert=client_cert,
auto_login_max_tries=20,
)
return admin_api
def ensure_admin_api_logged_in(admin_api: FLAdminAPI, timeout: int = 60):
login_success = False
try:
start_time = time.time()
while time.time() - start_time <= timeout:
if admin_api.is_ready():
login_success = True
break
time.sleep(0.2)
if not login_success:
print(f"Admin api failed to log in within {timeout} seconds: {admin_api.fsm.current_state}.")
else:
print("Admin successfully logged into server.")
except Exception as e:
print(f"Exception in logging in to admin: {e.__str__()}")
return login_success
| NVFlare-main | tests/integration_test/src/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import signal
from abc import ABC, abstractmethod
from typing import Dict, Optional
from .utils import run_command_in_subprocess
class SiteProperties:
def __init__(self, name: str, root_dir: str, process):
self.name = name
self.root_dir = root_dir
self.process = process
class ServerProperties(SiteProperties):
def __init__(self, name: str, root_dir: str, process, port: str):
super().__init__(name=name, root_dir=root_dir, process=process)
self.port = str(port)
def kill_process(site_prop: SiteProperties):
if not site_prop.process:
return
os.killpg(site_prop.process.pid, signal.SIGTERM)
p = run_command_in_subprocess(f"kill -9 {str(site_prop.process.pid)}")
p.wait()
p = run_command_in_subprocess(f"pkill -9 -f {site_prop.root_dir}")
p.wait()
print(f"Kill {site_prop.name}.")
site_prop.process.wait()
class SiteLauncher(ABC):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.overseer_properties: Optional[SiteProperties] = None
self.server_properties: Dict[str, ServerProperties] = {}
self.client_properties: Dict[str, SiteProperties] = {}
@abstractmethod
def prepare_workspace(self) -> str:
pass
@abstractmethod
def start_overseer(self):
pass
@abstractmethod
def start_server(self, server_id):
pass
@abstractmethod
def start_client(self, client_id):
pass
@abstractmethod
def stop_overseer(self):
pass
@abstractmethod
def start_servers(self):
pass
@abstractmethod
def start_clients(self):
pass
def stop_server(self, server_id):
if server_id not in self.server_properties:
raise RuntimeError(f"Server {server_id} not in server_properties.")
server_prop: ServerProperties = self.server_properties[server_id]
try:
# Kill the process
kill_process(server_prop)
except Exception as e:
print(f"Exception in stopping server {server_id}: {e.__str__()}")
def stop_client(self, client_id):
if client_id not in self.client_properties:
raise RuntimeError(f"Client {client_id} not in client_properties.")
client_prop: SiteProperties = self.client_properties[client_id]
try:
kill_process(client_prop)
except Exception as e:
print(f"Exception in stopping client {client_id}: {e.__str__()}")
def stop_all_clients(self):
for client_id in list(self.client_properties.keys()):
self.stop_client(client_id)
def stop_all_servers(self):
for server_id in list(self.server_properties.keys()):
self.stop_server(server_id)
def stop_all_sites(self):
self.stop_all_clients()
self.stop_all_servers()
self.stop_overseer()
def get_active_server_id(self, port) -> str:
active_server_id = None
for k in self.server_properties.keys():
if self.server_properties[k].port == str(port):
active_server_id = k
return active_server_id
def cleanup(self):
self.overseer_properties = None
self.server_properties.clear()
self.client_properties.clear()
| NVFlare-main | tests/integration_test/src/site_launcher.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import time
import yaml
from .site_launcher import ServerProperties, SiteLauncher, SiteProperties, kill_process
from .utils import (
cleanup_job_and_snapshot,
read_yaml,
run_command_in_subprocess,
run_provision_command,
update_job_store_path_in_workspace,
update_snapshot_path_in_workspace,
)
WORKSPACE = "ci_workspace"
PROD_FOLDER_NAME = "prod_00"
def _start_site(site_properties: SiteProperties):
process = run_command_in_subprocess(f"bash {os.path.join(site_properties.root_dir, 'startup', 'start.sh')}")
print(f"Starting {site_properties.name} ...")
site_properties.process = process
def _stop_site(site_properties: SiteProperties):
run_command_in_subprocess(f"echo 'y' | {os.path.join(site_properties.root_dir, 'startup', 'stop_fl.sh')}")
print(f"Stopping {site_properties.name} ...")
class ProvisionSiteLauncher(SiteLauncher):
def __init__(self, project_yaml: str):
super().__init__()
self.admin_user_names = []
self.project_yaml = read_yaml(project_yaml)
for p in self.project_yaml["participants"]:
name = p["name"]
script_dir = os.path.join(self._get_workspace_dir(), name)
if p["type"] == "server":
admin_port = p["admin_port"]
self.server_properties[name] = ServerProperties(name, script_dir, None, admin_port)
elif p["type"] == "client":
self.client_properties[name] = SiteProperties(name, script_dir, None)
elif p["type"] == "overseer":
self.overseer_properties = SiteProperties(name, script_dir, None)
elif p["type"] == "admin":
self.admin_user_names.append(name)
def _get_workspace_dir(self):
return os.path.join(WORKSPACE, self.project_yaml["name"], PROD_FOLDER_NAME)
def prepare_workspace(self) -> str:
_, temp_yaml = tempfile.mkstemp()
with open(temp_yaml, "w") as f:
yaml.dump(self.project_yaml, f, default_flow_style=False)
run_provision_command(project_yaml=temp_yaml, workspace=WORKSPACE)
os.remove(temp_yaml)
new_job_store = None
new_snapshot_store = None
for k in self.server_properties:
server_name = self.server_properties[k].name
new_job_store = update_job_store_path_in_workspace(self._get_workspace_dir(), server_name, new_job_store)
new_snapshot_store = update_snapshot_path_in_workspace(
self._get_workspace_dir(), server_name, new_snapshot_store
)
cleanup_job_and_snapshot(self._get_workspace_dir(), server_name)
return os.path.join(WORKSPACE, self.project_yaml["name"], PROD_FOLDER_NAME)
def start_overseer(self):
_start_site(self.overseer_properties)
def stop_overseer(self):
try:
# Kill the process
if self.overseer_properties:
kill_process(self.overseer_properties)
process = run_command_in_subprocess("pkill -9 -f gunicorn")
process.wait()
else:
print("No overseer process to stop.")
except Exception as e:
print(f"Exception in stopping overseer: {e.__str__()}")
finally:
self.overseer_properties = None
def start_servers(self):
for k in self.server_properties:
self.start_server(k)
time.sleep(3.0) # makes the first one always primary
def start_clients(self):
for k in self.client_properties:
self.start_client(k)
def start_server(self, server_id: str):
_start_site(self.server_properties[server_id])
def stop_server(self, server_id: str):
_stop_site(self.server_properties[server_id])
super().stop_server(server_id)
def start_client(self, client_id: str):
_start_site(self.client_properties[client_id])
def stop_client(self, client_id: str):
_stop_site(self.client_properties[client_id])
super().stop_client(client_id)
def cleanup(self):
process = run_command_in_subprocess(f"pkill -9 -f {PROD_FOLDER_NAME}")
process.wait()
for server_name in self.server_properties:
cleanup_job_and_snapshot(self._get_workspace_dir(), server_name)
shutil.rmtree(WORKSPACE)
super().cleanup()
| NVFlare-main | tests/integration_test/src/provision_site_launcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC
from typing import List
from nvflare.app_common.app_constant import AppConstants
from .job_result_validator import FinishJobResultValidator
class BaseCrossValResultValidator(FinishJobResultValidator, ABC):
def __init__(self, server_model_names: List[str]):
super().__init__()
self.server_model_names = server_model_names
def check_cross_validation_result(self, job_result, client_props, n_clients=-1, global_model_eval=False):
client_names = []
client_len = len(client_props) if n_clients == -1 else n_clients
for i in range(client_len):
client_names.append(client_props[i].name)
server_run_dir = job_result["workspace_root"]
cross_val_dir = os.path.join(server_run_dir, AppConstants.CROSS_VAL_DIR)
if not os.path.exists(cross_val_dir):
self.logger.error(f"models dir {cross_val_dir} doesn't exist.")
return False
model_shareable_dir = os.path.join(cross_val_dir, AppConstants.CROSS_VAL_MODEL_DIR_NAME)
if not os.path.exists(model_shareable_dir):
self.logger.error(f"model shareable directory {model_shareable_dir} doesn't exist.")
return False
result_shareable_dir = os.path.join(cross_val_dir, AppConstants.CROSS_VAL_RESULTS_DIR_NAME)
if not os.path.exists(result_shareable_dir):
self.logger.error(f"result shareable directory {result_shareable_dir} doesn't exist.")
return False
# There should be three files in model_shareable
server_model_names = [f"SRV_{i}" for i in self.server_model_names]
model_file_names = server_model_names.copy()
if not global_model_eval:
model_file_names = model_file_names + client_names
self.logger.info(f"Model files to look for: {model_file_names}")
for model_file_name in model_file_names:
model_file = os.path.join(model_shareable_dir, model_file_name)
if not os.path.exists(model_file):
self.logger.error(f"model {model_file} doesn't exist in model shareable directory.")
return False
# Check all the results
# results_file_names = ["client_1_server", "client_0_server", "client_1_client_0", "client_1_client_1",
# "client_0_client_1", "client_0_client_0"]
results_file_names = [f"{x}_{y}" for x in client_names for y in server_model_names]
if not global_model_eval:
for client_name in client_names:
results_file_names += [f"{client_name}_{x}" for x in client_names]
self.logger.info(f"Result files to look for: {results_file_names}")
for results_file_name in results_file_names:
result_file = os.path.join(result_shareable_dir, results_file_name)
if not os.path.exists(result_file):
self.logger.error(f"result {result_file} doesn't exist in result shareable directory.")
return False
return True
class GlobalModelEvalValidator(BaseCrossValResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
return self.check_cross_validation_result(job_result, client_props, n_clients=-1, global_model_eval=True)
class CrossValResultValidator(BaseCrossValResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
return self.check_cross_validation_result(job_result, client_props)
class CrossValSingleClientResultValidator(BaseCrossValResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
return self.check_cross_validation_result(job_result, client_props, n_clients=1)
| NVFlare-main | tests/integration_test/src/validators/cross_val_result_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .job_result_validator import FinishJobResultValidator
TB_PATH = "tb_events"
class TBResultValidator(FinishJobResultValidator):
def __init__(self, require_result_on_client: bool):
super().__init__()
self._require_result_on_client = require_result_on_client
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
server_tb_root_dir = os.path.join(server_run_dir, TB_PATH)
if not os.path.exists(server_tb_root_dir):
self.logger.error(f"server_tb_root_dir {server_tb_root_dir} doesn't exist.")
return False
# check client side
if self._require_result_on_client:
for client_prop in client_props:
client_run_dir = os.path.join(client_prop.root_dir, job_result["job_id"])
client_side_client_tb_dir = os.path.join(client_run_dir, TB_PATH, client_prop.name)
if not os.path.exists(client_side_client_tb_dir):
self.logger.error(f"client_side_client_tb_dir {client_side_client_tb_dir} doesn't exist.")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/tb_result_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from .job_result_validator import FinishJobResultValidator
class NumpyModelValidator(FinishJobResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
models_dir = os.path.join(server_run_dir, "models")
if not os.path.exists(models_dir):
self.logger.error(f"models dir {models_dir} doesn't exist.")
return False
model_path = os.path.join(models_dir, "server.npy")
if not os.path.isfile(model_path):
self.logger.error(f"model_path {model_path} doesn't exist.")
return False
try:
data = np.load(model_path)
self.logger.info(f"data loaded: {data}.")
except Exception as e:
self.logger.error(f"exception happens: {e.__str__()}")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/np_model_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from .job_result_validator import FinishJobResultValidator
class NumpySAGResultValidator(FinishJobResultValidator):
def __init__(self, expected_result):
super().__init__()
self.expected_result = np.array(expected_result)
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
models_dir = os.path.join(server_run_dir, "models")
if not os.path.exists(models_dir):
self.logger.error(f"models dir {models_dir} doesn't exist.")
return False
model_path = os.path.join(models_dir, "server.npy")
if not os.path.isfile(model_path):
self.logger.error(f"model_path {model_path} doesn't exist.")
return False
try:
data = np.load(model_path)
self.logger.info(f"data loaded: {data}.")
np.testing.assert_equal(data, self.expected_result)
except Exception as e:
self.logger.error(f"exception happens: {e.__str__()}")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/np_sag_result_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from abc import ABC, abstractmethod
from typing import List
from tests.integration_test.src import SiteProperties
class JobResultValidator(ABC):
@abstractmethod
def validate_results(self, job_result, client_props: List[SiteProperties]) -> bool:
pass
class FinishJobResultValidator(JobResultValidator, ABC):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
@abstractmethod
def validate_finished_results(self, job_result, client_props) -> bool:
pass
def validate_results(self, job_result, client_props) -> bool:
if not job_result:
return False
# check run folder exist
server_run_dir = job_result["workspace_root"]
if not os.path.exists(server_run_dir):
self.logger.error(f"server run dir {server_run_dir} doesn't exist.")
return False
for client_prop in client_props:
client_run_dir = os.path.join(client_prop.root_dir, job_result["job_id"])
if not os.path.exists(client_run_dir):
self.logger.error(f"client run dir {client_run_dir} doesn't exist.")
return False
if not self.validate_finished_results(job_result, client_props):
return False
return True
| NVFlare-main | tests/integration_test/src/validators/job_result_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cifar10_result_validator import CIFAR10ResultValidator
from .cross_val_result_validator import (
CrossValResultValidator,
CrossValSingleClientResultValidator,
GlobalModelEvalValidator,
)
from .np_model_validator import NumpyModelValidator
from .np_sag_result_validator import NumpySAGResultValidator
from .pt_model_validator import PTModelValidator
from .tb_result_validator import TBResultValidator
from .tf_model_validator import TFModelValidator
| NVFlare-main | tests/integration_test/src/validators/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.fl_constant import WorkspaceConstants
from nvflare.app_common.app_constant import DefaultCheckpointFileName
from .job_result_validator import FinishJobResultValidator
class CIFAR10ResultValidator(FinishJobResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
server_models_dir = os.path.join(server_run_dir, WorkspaceConstants.APP_PREFIX + "server")
if not os.path.exists(server_models_dir):
self.logger.error(f"models dir {server_models_dir} doesn't exist.")
return False
model_path = os.path.join(server_models_dir, DefaultCheckpointFileName.GLOBAL_MODEL)
if not os.path.isfile(model_path):
self.logger.error(f"model_path {model_path} doesn't exist.")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/cifar10_result_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.fl_constant import WorkspaceConstants
from nvflare.apis.utils.decomposers import flare_decomposers
from nvflare.app_common.decomposers import common_decomposers
from nvflare.fuel.utils import fobs
from .job_result_validator import FinishJobResultValidator
class TFModelValidator(FinishJobResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
server_models_dir = os.path.join(server_run_dir, WorkspaceConstants.APP_PREFIX + "server")
if not os.path.exists(server_models_dir):
self.logger.error(f"models dir {server_models_dir} doesn't exist.")
return False
model_path = os.path.join(server_models_dir, "tf2weights.fobs")
if not os.path.isfile(model_path):
self.logger.error(f"model_path {model_path} doesn't exist.")
return False
try:
flare_decomposers.register()
common_decomposers.register()
data = fobs.load(open(model_path, "rb"))
self.logger.info(f"Data loaded: {data}.")
assert "weights" in data
assert "meta" in data
except Exception as e:
self.logger.error(f"Exception in validating TF model: {e.__str__()}")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/tf_model_validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvflare.apis.fl_constant import WorkspaceConstants
from nvflare.app_common.app_constant import DefaultCheckpointFileName
from .job_result_validator import FinishJobResultValidator
class PTModelValidator(FinishJobResultValidator):
def validate_finished_results(self, job_result, client_props) -> bool:
server_run_dir = job_result["workspace_root"]
server_models_dir = os.path.join(server_run_dir, WorkspaceConstants.APP_PREFIX + "server")
if not os.path.exists(server_models_dir):
self.logger.error(f"models dir {server_models_dir} doesn't exist.")
return False
model_path = os.path.join(server_models_dir, DefaultCheckpointFileName.GLOBAL_MODEL)
if not os.path.isfile(model_path):
self.logger.error(f"model_path {model_path} doesn't exist.")
return False
return True
| NVFlare-main | tests/integration_test/src/validators/pt_model_validator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import pytest
from nvflare.fuel.hci.security import get_certificate_common_name, hash_password, make_session_token, verify_password
class TestSecurityUtils:
@pytest.mark.parametrize("password_to_hash", ["abcde", "xyz"])
def test_hash_password(self, password_to_hash):
hashed_password = hash_password(password_to_hash)
assert verify_password(hashed_password, password_to_hash)
@pytest.mark.parametrize(
"password_hash, password_to_verify, expected_result",
[
[
"b1a020416bc1479da3f937af46f90a09a4c09cd6271609105f80e7c9e7fd461ffe463784834ea63c7525f85b80435bbc0dfba614570f23aaccbd115bbef81a57b2e73a39563f0d1b75132c8b9e1b53dc94a1525be01d0e6862e577360e820592",
"abcde",
False,
],
[
"b1a020416bc1479da3f937af46f90a09a4c09cd6271609105f80e7c9e7fd461ffe463784834ea63c7525f85b80435bbc0dfba614570f23aaccbd115bbef81a57b2e73a39563f0d1b75132c8b9e1b53dc94a1525be01d0e6862e577360e820592",
"xyz",
True,
],
],
)
def test_verify_password(self, password_hash, password_to_verify, expected_result):
result = verify_password(password_hash, password_to_verify)
assert result == expected_result
def test_make_session_token(self):
uuid.UUID(make_session_token())
assert True
@pytest.mark.parametrize(
"cert, expected",
[
(None, None),
({}, None),
({"subject": {}}, None),
(
{
"subject": (
(("description", "571208-SLe257oHY9fVQ07Z"),),
(("countryName", "US"),),
(("stateOrProvinceName", "California"),),
(("localityName", "San Francisco"),),
(("organizationName", "Electronic Frontier Foundation, Inc."),),
(("commonName", "*.eff.org"),),
(("emailAddress", "[email protected]"),),
)
},
"*.eff.org",
),
],
)
def test_get_certificate_common_name(self, cert, expected):
result = get_certificate_common_name(cert)
assert result == expected
| NVFlare-main | tests/unit_test/fuel/hci/security_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/hci/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import pytest
from nvflare.fuel.hci.chunk import MAX_CHUNK_SIZE, Receiver, Sender, get_slice
class DataCollector:
def __init__(self):
self.buffer = bytearray()
def collect(self, data):
self.buffer.extend(data)
def collect_from(self, data, start: int, length: int):
self.buffer.extend(get_slice(data, start, length))
class TestChunkSendReceive:
def do_send_receive(self):
data = bytearray()
coll = DataCollector()
sender = Sender(send_data_func=coll.collect)
c1 = os.urandom(1000)
data.extend(c1)
sender.send(c1)
c2 = os.urandom(random.randint(1, 1024))
data.extend(c2)
sender.send(c2)
c3 = os.urandom(random.randint(1, 2048))
data.extend(c3)
sender.send(c3)
sender.close()
buffer = coll.buffer
num_stops = 4
stops = random.sample(range(1, len(buffer) - 1), num_stops)
stops.sort()
coll2 = DataCollector()
receiver = Receiver(receive_data_func=coll2.collect_from)
start = 0
for i in range(num_stops + 1):
if i < num_stops:
end = stops[i]
else:
end = len(buffer)
buf = buffer[start:end]
receiver.receive(buf)
start += end - start
assert coll2.buffer == data
def test_send_random(self):
for _ in range(1000):
self.do_send_receive()
def send_one_chunk(self, size):
coll = DataCollector()
sender = Sender(send_data_func=coll.collect)
if size == 0:
data = b""
else:
data = os.urandom(size)
sender.send(data)
buffer = coll.buffer
coll2 = DataCollector()
receiver = Receiver(receive_data_func=coll2.collect_from)
receiver.receive(buffer)
assert coll2.buffer == data
def test_send_one_byte(self):
self.send_one_chunk(1)
def test_send_zero_byte(self):
self.send_one_chunk(0)
def test_send_max_bytes(self):
self.send_one_chunk(MAX_CHUNK_SIZE)
def test_max_size_error(self):
with pytest.raises(RuntimeError):
self.send_one_chunk(MAX_CHUNK_SIZE + 1)
| NVFlare-main | tests/unit_test/fuel/hci/chunk_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/f3/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import pytest
from nvflare.fuel.f3.communicator import Communicator
from nvflare.fuel.f3.connection import Connection
from nvflare.fuel.f3.drivers.connector_info import Mode
from nvflare.fuel.f3.drivers.net_utils import parse_url
from nvflare.fuel.f3.endpoint import Endpoint, EndpointMonitor, EndpointState
from nvflare.fuel.f3.message import Message, MessageReceiver
log = logging.getLogger(__name__)
APP_ID = 123
NODE_A = "Communicator A"
NODE_B = "Communicator B"
MESSAGE_FROM_A = "Test message from a"
MESSAGE_FROM_B = "Test message from b"
class Monitor(EndpointMonitor):
def __init__(self, tester):
self.tester = tester
def state_change(self, endpoint: Endpoint):
if endpoint.state == EndpointState.READY:
if endpoint.name == NODE_A:
self.tester.a_ready = True
else:
self.tester.b_ready = True
class Receiver(MessageReceiver):
def __init__(self, tester):
self.tester = tester
def process_message(self, endpoint: Endpoint, connection: Connection, app_id: int, message: Message):
text = message.payload.decode("utf-8")
if endpoint.name == NODE_A:
assert text == MESSAGE_FROM_A
self.tester.a_received = True
else:
assert text == MESSAGE_FROM_B
self.tester.b_received = True
@pytest.mark.xdist_group(name="test_f3_communicator")
class TestCommunicator:
@pytest.fixture
def comm_a(self):
local_endpoint = Endpoint(NODE_A, {"foo": "test"})
comm = Communicator(local_endpoint)
comm.register_monitor(Monitor(self))
comm.register_message_receiver(APP_ID, Receiver(self))
self.a_ready = False
self.a_received = False
return comm
@pytest.fixture
def comm_b(self):
local_endpoint = Endpoint(NODE_B, {"bar": 123})
comm = Communicator(local_endpoint)
comm.register_monitor(Monitor(self))
comm.register_message_receiver(APP_ID, Receiver(self))
self.b_ready = False
self.b_received = False
return comm
@pytest.mark.parametrize(
"scheme, port_range",
[
("tcp", "2000-3000"),
("grpc", "3000-4000"),
("http", "4000-5000"),
("atcp", "5000-6000"),
],
)
def test_sfm_message(self, comm_a, comm_b, scheme, port_range):
handle1, url = comm_a.start_listener(scheme, {"ports": port_range})
comm_a.start()
# Check port is in the range
if port_range:
parts = port_range.split("-")
lo = int(parts[0])
hi = int(parts[1])
params = parse_url(url)
port = int(params.get("port"))
assert lo <= port <= hi
comm_b.add_connector(url, Mode.ACTIVE)
comm_b.start()
while not self.a_ready or not self.b_ready:
log.info("Waiting for both endpoints to be ready")
time.sleep(0.1)
comm_a.send(Endpoint(NODE_B), APP_ID, Message({}, MESSAGE_FROM_A.encode("utf-8")))
comm_b.send(Endpoint(NODE_A), APP_ID, Message({}, MESSAGE_FROM_B.encode("utf-8")))
time.sleep(1)
assert self.a_received and self.b_received
comm_b.stop()
comm_a.stop()
| NVFlare-main | tests/unit_test/fuel/f3/communicator_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.drivers.net_utils import encode_url, parse_url
class TestNetUtils:
def test_encode_url(self):
params = {
DriverParams.SCHEME.value: "tcp",
DriverParams.HOST.value: "flare.test.com",
DriverParams.PORT.value: 1234,
"b": "test value",
"a": 123,
"r": False,
}
url = encode_url(params)
assert url == "tcp://flare.test.com:1234?b=test+value&a=123&r=False"
def test_parse_url(self):
url = "grpc://test.com:8002?a=123&b=test"
params = parse_url(url)
assert params.get(DriverParams.URL) == url
assert int(params.get(DriverParams.PORT)) == 8002
assert params.get("a") == "123"
assert params.get("b") == "test"
| NVFlare-main | tests/unit_test/fuel/f3/drivers/net_utils_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/f3/drivers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from nvflare.fuel.f3 import drivers
from nvflare.fuel.f3.drivers.aio_grpc_driver import AioGrpcDriver
from nvflare.fuel.f3.drivers.aio_http_driver import AioHttpDriver
from nvflare.fuel.f3.drivers.aio_tcp_driver import AioTcpDriver
from nvflare.fuel.f3.drivers.driver_manager import DriverManager
from nvflare.fuel.f3.drivers.tcp_driver import TcpDriver
class TestDriverManager:
@pytest.fixture
def manager(self):
driver_manager = DriverManager()
driver_manager.search_folder(os.path.dirname(drivers.__file__), drivers.__package__)
return driver_manager
@pytest.mark.parametrize(
"scheme, expected",
[
("tcp", TcpDriver),
("stcp", TcpDriver),
("grpc", AioGrpcDriver),
("grpcs", AioGrpcDriver),
("http", AioHttpDriver),
("https", AioHttpDriver),
("ws", AioHttpDriver),
("wss", AioHttpDriver),
("atcp", AioTcpDriver),
("satcp", AioTcpDriver),
],
)
def test_driver_loading(self, manager, scheme, expected):
driver_class = manager.find_driver_class(scheme)
assert driver_class == expected
| NVFlare-main | tests/unit_test/fuel/f3/drivers/driver_manager_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from nvflare.fuel.f3 import communicator
# Setup custom driver path before communicator module initialization
from nvflare.fuel.utils.config_service import ConfigService
class TestCustomDriver:
@pytest.fixture
def manager(self):
rel_path = "../../../data/custom_drivers/config"
config_path = os.path.normpath(os.path.join(os.path.dirname(__file__), rel_path))
ConfigService.initialize({}, [config_path])
communicator.load_comm_drivers()
return communicator.driver_mgr
def test_custom_driver_loading(self, manager):
driver_class = manager.find_driver_class("warp")
assert driver_class.__name__ == "WarpDriver"
| NVFlare-main | tests/unit_test/fuel/f3/drivers/custom_driver_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import pytest
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.stream_types import StreamFuture
from nvflare.fuel.f3.streaming.tools.utils import RX_CELL, TEST_CHANNEL, TEST_TOPIC, TX_CELL, make_buffer
from nvflare.fuel.utils.network_utils import get_open_ports
WAIT_SEC = 10
class State:
def __init__(self):
self.done = threading.Event()
self.result = None
class TestStreamCell:
@pytest.fixture
def port(self):
return get_open_ports(1)[0]
@pytest.fixture
def state(self):
return State()
@pytest.fixture
def server_cell(self, port, state):
listening_url = f"tcp://localhost:{port}"
cell = CoreCell(RX_CELL, listening_url, secure=False, credentials={})
stream_cell = StreamCell(cell)
stream_cell.register_blob_cb(TEST_CHANNEL, TEST_TOPIC, self.blob_cb, state=state)
cell.start()
return stream_cell
@pytest.fixture
def client_cell(self, port, state):
connect_url = f"tcp://localhost:{port}"
cell = CoreCell(TX_CELL, connect_url, secure=False, credentials={})
stream_cell = StreamCell(cell)
cell.start()
return stream_cell
def test_streaming_blob(self, server_cell, client_cell, state):
size = 64 * 1024 * 1024 + 123
buffer = make_buffer(size)
send_future = client_cell.send_blob(TEST_CHANNEL, TEST_TOPIC, RX_CELL, Message(None, buffer))
bytes_sent = send_future.result()
assert bytes_sent == len(buffer)
if not state.done.wait(timeout=30):
raise Exception("Data not received after 30 seconds")
assert buffer == state.result
def blob_cb(self, future: StreamFuture, **kwargs):
state = kwargs.get("state")
state.result = future.result()
state.done.set()
| NVFlare-main | tests/unit_test/fuel/f3/streaming/streaming_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/f3/streaming/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from pathlib import Path
import pytest
from nvflare.fuel.utils.zip_utils import (
get_all_file_paths,
normpath_for_zip,
remove_leading_dotdot,
split_path,
unzip_all_from_bytes,
zip_directory_to_bytes,
)
@pytest.fixture()
def create_fake_dir():
"""
/a/b/c/
folder1/
file1
file2
folder2/
file3
file4
folder3/
folder4/
file5
folder5/
folder6/
file6
file7
"""
temp_dir = Path(tempfile.mkdtemp())
prefix = os.path.sep.join(["a", "b", "c"])
root_dir = temp_dir / prefix
os.makedirs(root_dir)
os.mkdir(root_dir / "folder1")
open(root_dir / "folder1" / "file1", "w").close()
open(root_dir / "folder1" / "file2", "w").close()
os.mkdir(root_dir / "folder2")
open(root_dir / "folder2" / "file3", "w").close()
open(root_dir / "folder2" / "file4", "w").close()
os.mkdir(root_dir / "folder3")
os.mkdir(root_dir / "folder3" / "folder4")
open(root_dir / "folder3" / "folder4" / "file5", "w").close()
os.mkdir(root_dir / "folder3" / "folder5")
os.mkdir(root_dir / "folder6")
open(root_dir / "file6", "w").close()
open(root_dir / "file7", "w").close()
yield temp_dir, prefix
shutil.rmtree(temp_dir)
class TestZipUtils:
@pytest.mark.parametrize(
"path, output",
[
(os.path.sep.join(["..", "..", "..", "hello"]), "hello"),
(f".{os.path.sep}hello", "hello"),
(os.path.sep.join(["..", "..", "..", "hello", "motor"]), f"hello{os.path.sep}motor"),
(os.path.sep.join(["..", "..", "..", "hello", "..", "motor"]), os.path.sep.join(["hello", "..", "motor"])),
(f"{os.path.abspath(os.path.sep)}hello", f"{os.path.abspath(os.path.sep)}hello"),
],
)
def test_remove_leading_dotdot(self, path, output):
assert remove_leading_dotdot(path) == output
@pytest.mark.parametrize(
"path, output",
[
("hello", ("", "hello")),
(f".{os.path.sep}hello", ("", "hello")),
(
os.path.sep.join(["..", "..", "..", "hello", "..", "motor"]),
(os.path.sep.join(["..", "..", "..", "hello", ".."]), "motor"),
),
(f"{os.path.abspath(os.path.sep)}hello", (os.path.abspath(os.path.sep), "hello")),
(f"hello{os.path.sep}", ("", "hello")),
(
os.path.sep.join(["..", "hello", "..", "motor", ""]),
(os.path.sep.join(["..", "hello", ".."]), "motor"),
),
],
)
def test_split_path(self, path, output):
assert split_path(path) == output
@pytest.mark.parametrize(
"path, expected",
[
("hello", "hello"),
("PPAP\\ABCD", "PPAP/ABCD"),
("/home/random_dir/something.txt", "/home/random_dir/something.txt"),
],
)
def test_normpath_for_zip(self, path, expected):
assert normpath_for_zip(path) == expected
def test_get_all_file_paths(self, create_fake_dir):
tmp_dir, prefix = create_fake_dir
test_path = os.path.join(tmp_dir, prefix)
assert len(get_all_file_paths(test_path)) == 13
def test_zip_unzip(self, create_fake_dir):
tmp_dir, prefix = create_fake_dir
first, second = os.path.split(prefix)
root_dir = os.path.join(tmp_dir, first)
zip_data = zip_directory_to_bytes(root_dir=root_dir, folder_name=second)
temp_dir = tempfile.mkdtemp()
unzip_all_from_bytes(zip_data, output_dir_name=temp_dir)
for i, j in zip(os.walk(root_dir), os.walk(temp_dir)):
assert i[1] == j[1]
assert i[2] == j[2]
shutil.rmtree(temp_dir)
| NVFlare-main | tests/unit_test/fuel/utils/zip_utils_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nvflare.fuel.utils.import_utils import LazyImportError, optional_import
class TestOptionalImport:
def test_lazy_import(self):
np, flag = optional_import("numpy")
assert flag is True
np, flag = optional_import(module="numpy", op=">=", version="1.0.0")
assert flag is True
np, flag = optional_import("numpy", ">=", "100.0.0")
assert flag is False
with pytest.raises(LazyImportError):
print(np.will_faill)
# numpy is 1.22
# np, flag = optional_import("numpy", "==", "1.22")
# assert flag == True
the_module, flag = optional_import("unknown_module")
with pytest.raises(LazyImportError):
print(the_module.method) # trying to access a module which is not imported
torch, flag = optional_import("torch", "==", "42")
with pytest.raises(LazyImportError):
print(torch.nn) # trying to access a module for which there isn't a proper version imported
# if you have torch installed. uncomment this line
# conv, flag = optional_import(module="torch.nn.functional", name="conv1d")
# print(conv)
# assert flag == True
with pytest.raises(LazyImportError):
conv, flag = optional_import(module="torch", op=">=", version="42")
# trying to use a function from the not successfully imported module (due to unmatched version)
print(conv())
with pytest.raises(LazyImportError):
conv, flag = optional_import(module="torch.nn.functional", op=">=", version="42", name="conv1d")
# trying to use a function from the not successfully imported module (due to unmatched version)
print(conv())
| NVFlare-main | tests/unit_test/fuel/utils/import_utils_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
import pytest
from nvflare.fuel.utils.config import ConfigFormat, ConfigLoader
from nvflare.fuel.utils.config_factory import ConfigFactory
class TestConfigFactory:
@pytest.mark.parametrize(
"init_file_path, file_basename",
[
["fed_client_config.json", "fed_client_config"],
["fed_client_config.json.default", "fed_client_config"],
["/abc/efg/[email protected]/fed_client_config.json", "fed_client_config"],
["/abc/efg/[email protected]/fed_client_config.json.default", "fed_client_config"],
["./abc/fed_client_config.json.default", "fed_client_config"],
],
)
def test_get_file_basename(self, init_file_path, file_basename):
assert file_basename == ConfigFactory.get_file_basename(init_file_path)
def test_fmt_to_loader(self):
fmt2loaders = ConfigFactory._fmt2Loader
ext2fmts = ConfigFormat.config_ext_formats()
config_dicts = dict(
example="1",
version=2,
a=dict(a=2, b=3),
databases=dict(
mysql=dict(url="mysql://xxx", user="admin"), postgres=dict(url="postgres://xxx", user="root")
),
arrs=[1, 2, 5, 6],
)
# note this tests assume Pyhocon and OmegaConf both are installed.
# to test only Pyhocon installed (but OmegaConf not installed) or
# OmegaConf installed (but Pyhocon Not installed)
# One can simply filter one config format out. The code should still work without asking for other dependency
for ext in ext2fmts:
fmt = ext2fmts[ext]
loader: ConfigLoader = fmt2loaders[fmt]
assert isinstance(loader, ConfigLoader)
assert loader.get_format() == fmt
config = loader.load_config_from_dict(config_dicts)
assert config.to_dict() == config_dicts
def _walk_result(self, search_dir):
if search_dir == ".":
return [(".", [], ["fed_client_config.conf", "fed_client_config.yml"])]
elif search_dir == "/tmp/nvflare":
return [
("/tmp/nvflare", ["site-1", "xyz"], []),
("/tmp/nvflare/site-1", [], ["fed_client_config.conf"]),
("/tmp/nvflare/xyz", [], ["hello.txt"]),
]
else:
return []
@pytest.mark.parametrize(
"init_file_path, search_dirs, expected_loc, expected_fmt",
[
["fed_client_config.json", ["."], "./fed_client_config.conf", ConfigFormat.PYHOCON],
[
"fed_client_config.json",
["/tmp/nvflare"],
"/tmp/nvflare/site-1/fed_client_config.conf",
ConfigFormat.PYHOCON,
],
],
)
def test_config_search(self, init_file_path, search_dirs, expected_loc, expected_fmt):
import os
fn = os.walk
os.walk = MagicMock(side_effect=self._walk_result)
fmt, location = ConfigFactory.search_config_format(init_file_path, search_dirs)
assert fmt == expected_fmt
assert location == expected_loc
# restore function to avoid unexpected side-effect
os.walk = fn
@pytest.mark.parametrize(
"init_file_path, expected, namelist",
[
[
"config_fed_client.json",
True,
[
"user_email_match/meta.json",
"user_email_match/app/",
"user_email_match/app/custom/",
"user_email_match/app/config/",
"user_email_match/app/custom/local_psi.py",
"user_email_match/app/config/config_fed_client.conf",
"user_email_match/app/config/config_fed_server.conf",
"user_email_match/meta.json",
"user_email_match/app/",
"user_email_match/app/custom/",
"user_email_match/app/config/",
"user_email_match/app/custom/local_psi.py",
"user_email_match/app/config/config_fed_client.conf",
"user_email_match/app/config/config_fed_server.conf",
"user_email_match/meta.json",
"user_email_match/app/",
"user_email_match/app/custom/",
"user_email_match/app/config/",
"user_email_match/app/custom/local_psi.py",
"user_email_match/app/config/config_fed_client.conf",
"user_email_match/app/config/config_fed_server.conf" "user_email_match/meta.json",
"user_email_match/app/",
"user_email_match/app/custom/",
"user_email_match/app/config/",
"user_email_match/app/custom/local_psi.py",
"user_email_match/app/config/config_fed_client.conf",
"user_email_match/app/config/config_fed_server.conf",
],
]
],
)
def test_match_config(self, init_file_path, expected, namelist):
def match(parent, config_path: str):
import os
full_path = os.path.join("user_email_match/app/config", config_path)
return full_path in namelist
assert expected == ConfigFactory.match_config(None, init_file_path, match)
| NVFlare-main | tests/unit_test/fuel/utils/config_factory_test.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | tests/unit_test/fuel/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from omegaconf import OmegaConf
from nvflare.fuel.utils.config import ConfigFormat
from nvflare.fuel_opt.utils.omegaconf_loader import OmegaConfLoader
class TestOmegaConfConfig:
def return_conf(self, file_name):
if file_name == "test.yml":
x = """
a :
a1 : 1
a2 : 2
b : 1
c : "hi"
d :
- 1
- 2
"""
else:
x = """
a:
a1 : 2
a2 : 4
b : 2
c : "hello"
d :
- 2
- 4
"""
return OmegaConf.create(x)
def test_config_loader(self):
loader = OmegaConfLoader()
assert loader.get_format() == ConfigFormat.OMEGACONF
loader._from_file = self.return_conf
dicts = {
"a": {
"a1": 200,
},
"c": "hello",
"d": [200, 400, 500],
"e1": "Yes",
"e2": "True",
"e3": "NO",
}
config = loader.load_config("test.yml")
assert config.get_format() == ConfigFormat.OMEGACONF
conf = config.get_native_conf()
print(config.to_dict())
assert config is not None
assert conf.a.a1 == 1
with pytest.raises(Exception):
assert conf["a.a1"] == 1
assert conf["a"]["a1"] == 1
assert conf.b == 1
assert conf.c == "hi"
assert conf.d == [1, 2]
assert conf.get("e4", None) is None
a_dict = OmegaConf.to_container(conf.a, resolve=True)
assert a_dict == {"a1": 1, "a2": 2}
def test_load_config_from_dict(self):
loader = OmegaConfLoader()
assert loader.get_format() == ConfigFormat.OMEGACONF
dicts = {
"a": {
"a1": 200,
},
"c": "hello",
"d": [200, 400, 500],
"e1": "Yes",
"e2": "True",
"e3": "NO",
}
config = loader.load_config_from_dict(dicts)
assert config.get_format() == ConfigFormat.OMEGACONF
assert config is not None
assert config.to_dict() == dicts
def test_load_config_from_str(self):
loader = OmegaConfLoader()
assert loader.get_format() == ConfigFormat.OMEGACONF
dicts = {
"a": {
"a1": 200,
},
"c": "hello",
"d": [200, 400, 500],
"e1": "Yes",
"e2": "True",
"e3": "NO",
}
config = loader.load_config_from_dict(dicts)
config = loader.load_config_from_str(config.to_str())
assert config is not None
assert config.to_dict() == dicts
assert config.get_format() == ConfigFormat.OMEGACONF
| NVFlare-main | tests/unit_test/fuel/utils/omegaconf_config_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nvflare.fuel.utils.validation_utils import (
DefaultValuePolicy,
check_non_empty_str,
check_number_range,
check_positive_int,
check_positive_number,
normalize_config_arg,
validate_candidate,
validate_candidates,
)
class TestValidationUtils:
@pytest.mark.parametrize(
"value, result",
[
("x", "x"),
(123, 123),
("", ""),
(False, None),
("@None", None),
(None, ""),
(0, ""),
([], ""),
({}, ""),
((), ""),
([1, 2, 3], [1, 2, 3]),
],
)
def test_normalize_config_arg(self, value, result):
assert normalize_config_arg(value) == result
@pytest.mark.parametrize(
"name, num, min_value, max_value",
[
("x", 12.34, None, None),
("x", 0.23, -1.0, None),
("x", 0, None, 1.0),
("x", 0, -0.01, 0.1),
],
)
def test_check_number_range(self, name, num, min_value, max_value):
check_number_range(name, num, min_value, max_value)
@pytest.mark.parametrize(
"name, num, min_value, max_value",
[
("x", -1.0, 0.0, None),
("x", "0", None, None),
("x", 2.0, 0.1, 1.0),
("x", -5, -10, -6),
("x", 0, "-1", None),
("x", 0, -1, "-2"),
],
)
def test_check_number_range_error(self, name, num, min_value, max_value):
with pytest.raises(Exception):
check_number_range(name, num, min_value, max_value)
@pytest.mark.parametrize(
"name, num",
[
("x", 1),
("x", 100),
("x", 12345),
],
)
def test_check_positive_int(self, name, num):
check_positive_int(name, num)
@pytest.mark.parametrize(
"name, num",
[
("x", 0),
("x", -1.0),
("x", "0"),
("x", 2.0),
("x", -5),
],
)
def test_check_positive_int_error(self, name, num):
with pytest.raises(Exception):
check_positive_int(name, num)
@pytest.mark.parametrize(
"name, num",
[
("x", 1),
("x", 100),
("x", 12345),
("x", 0.001),
("x", 1.3e5),
],
)
def test_check_positive_number(self, name, num):
check_positive_number(name, num)
@pytest.mark.parametrize(
"name, num",
[("x", 0), ("x", 0.0), ("x", -1.0), ("x", "0"), ("x", -5), ("x", -1.3e5)],
)
def test_check_positive_number_error(self, name, num):
with pytest.raises(Exception):
check_positive_number(name, num)
@pytest.mark.parametrize(
"name, value",
[
("x", "abc"),
("x", " vsd "),
],
)
def test_check_non_empty_str(self, name, value):
check_non_empty_str(name, value)
@pytest.mark.parametrize(
"name, num",
[
("x", 0),
("x", 1.2324),
("x", ""),
("x", " "),
],
)
def test_check_non_empty_str_error(self, name, num):
with pytest.raises(Exception):
check_non_empty_str(name, num)
@pytest.mark.parametrize(
"var_name, candidate, base, default_policy, allow_none, output",
[
("x", "red", ["red", "blue"], DefaultValuePolicy.ANY, True, "red"),
("x", " red ", ["red", "blue"], DefaultValuePolicy.ANY, True, "red"),
("x", "", ["red", "blue"], DefaultValuePolicy.ANY, True, "red"),
("x", "", ["red", "blue"], DefaultValuePolicy.EMPTY, True, ""),
("x", None, ["red", "blue"], DefaultValuePolicy.ANY, True, ""),
("x", "@none", ["red", "blue"], DefaultValuePolicy.ANY, True, ""),
],
)
def test_validate_candidate(self, var_name, candidate, base, default_policy: str, allow_none, output):
assert validate_candidate(var_name, candidate, base, default_policy, allow_none) == output
@pytest.mark.parametrize(
"var_name, candidate, base, default_policy, allow_none",
[
("x", "red", ["red", "blue"], "bad", True),
("x", "Red", ["red", "blue"], "bad", True),
("x", 2, ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", "", ["red", "blue"], DefaultValuePolicy.DISALLOW, True),
("x", "", ["red", "blue"], DefaultValuePolicy.ALL, True),
("x", "yellow", ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", None, ["red", "blue"], DefaultValuePolicy.ANY, False),
("x", "@none", ["red", "blue"], DefaultValuePolicy.ANY, False),
("x", "@all", ["red", "blue"], DefaultValuePolicy.ANY, False),
],
)
def test_validate_candidate_error(self, var_name, candidate, base, default_policy, allow_none):
with pytest.raises(ValueError):
validate_candidate(var_name, candidate, base, default_policy, allow_none)
@pytest.mark.parametrize(
"var_name, candidates, base, default_policy, allow_none, output",
[
("x", "red", ["red", "blue"], DefaultValuePolicy.ANY, True, ["red"]),
("x", [" red ", "blue", "red"], ["red", "blue", "green"], DefaultValuePolicy.ANY, True, ["red", "blue"]),
("x", "", ["red", "blue"], DefaultValuePolicy.ANY, True, ["red"]),
("x", "", ["red", "blue"], DefaultValuePolicy.ALL, True, ["red", "blue"]),
("x", "", ["red", "blue"], DefaultValuePolicy.EMPTY, True, []),
("x", "red", ["red", "blue"], DefaultValuePolicy.ANY, True, ["red"]),
("x", [], ["red", "blue"], DefaultValuePolicy.ANY, True, ["red"]),
("x", [], ["red", "blue"], DefaultValuePolicy.EMPTY, True, []),
("x", [], ["red", "blue"], DefaultValuePolicy.ALL, True, ["red", "blue"]),
("x", None, ["red", "blue"], DefaultValuePolicy.ANY, True, []),
("x", "@all", ["red", "blue"], DefaultValuePolicy.ANY, True, ["red", "blue"]),
("x", "@none", ["red", "blue"], DefaultValuePolicy.ANY, True, []),
],
)
def test_validate_candidates(self, var_name, candidates, base, default_policy, allow_none, output):
assert validate_candidates(var_name, candidates, base, default_policy, allow_none) == output
@pytest.mark.parametrize(
"var_name, candidate, base, default_policy, allow_none",
[
("x", "red", ["red", "blue"], "bad", True),
("x", "Red", ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", 2, ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", ["red", "green"], ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", ["Red"], ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", "", ["red", "blue"], DefaultValuePolicy.DISALLOW, True),
("x", [], ["red", "blue"], DefaultValuePolicy.DISALLOW, True),
("x", "yellow", ["red", "blue"], DefaultValuePolicy.ANY, True),
("x", None, ["red", "blue"], DefaultValuePolicy.ANY, False),
("x", "@none", ["red", "blue"], DefaultValuePolicy.ANY, False),
],
)
def test_validate_candidates_error(self, var_name, candidate, base, default_policy, allow_none):
with pytest.raises(ValueError):
validate_candidates(var_name, candidate, base, default_policy, allow_none)
| NVFlare-main | tests/unit_test/fuel/utils/validation_utils_test.py |
Subsets and Splits