python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyhocon import ConfigFactory as CF
from nvflare.tool.job.config.config_indexer import KeyIndex, build_dict_reverse_order_index
from nvflare.tool.job.config.configer import extract_string_with_index, filter_config_name_and_values
class TestConfigIndex:
def test_dict_indexer(self):
key_indices = build_dict_reverse_order_index(config=CF.from_dict({}))
assert len(key_indices) == 0
config_dict = dict(
x=dict(
x1=dict(x11=2),
x2=dict(x21=3, x22=4, x23=dict(x31=3, x32=4)),
y=dict(y1=dict(y11=2), y2=dict(y21=1)),
z=[
dict(id=2),
dict(id=3),
dict(id=4),
100,
],
s=[
dict(id=2),
100,
],
)
)
config = CF.from_dict(config_dict)
root_key = KeyIndex(key="", value=config, parent_key=None)
x_key = KeyIndex(key="x", value=config.get("x"), parent_key=None)
x1_key = KeyIndex(key="x1", value=config.get("x").get("x1"), parent_key=x_key)
x2_key = KeyIndex(key="x2", value=config.get("x").get("x2"), parent_key=x_key)
y_key = KeyIndex(key="y", value=config.get("x").get("y"), parent_key=x_key)
z_key = KeyIndex(key="z", value=config.get("x").get("z"), parent_key=x_key)
s_key = KeyIndex(key="s", value=config.get("x").get("s"), parent_key=x_key)
x11_key = KeyIndex(key="x11", value=2, parent_key=x1_key)
x21_key = KeyIndex(key="x21", value=3, parent_key=x2_key)
x22_key = KeyIndex(key="x22", value=4, parent_key=x2_key)
x23_key = KeyIndex(key="x23", value=config.get("x").get("x2").get("x23"), parent_key=x2_key)
x31_key = KeyIndex(key="x31", value=3, parent_key=x23_key)
x32_key = KeyIndex(key="x32", value=4, parent_key=x23_key)
y1_key = KeyIndex(key="y1", value=config.get("x").get("y").get("y1"), parent_key=y_key)
y2_key = KeyIndex(key="y2", value=config.get("x").get("y").get("y2"), parent_key=y_key)
y11_key = KeyIndex(key="y11", value=2, parent_key=y1_key)
y21_key = KeyIndex(key="y21", value=1, parent_key=y2_key)
z0_key = KeyIndex(key="z[0]", value=config.get("x").get("z")[0], parent_key=z_key, index=0)
z1_key = KeyIndex(key="z[1]", value=config.get("x").get("z")[1], parent_key=z_key, index=1)
z2_key = KeyIndex(key="z[2]", value=config.get("x").get("z")[2], parent_key=z_key, index=2)
z3_key = KeyIndex(key="z[3]", value=100, parent_key=z_key, index=3)
s0_key = KeyIndex(key="s[0]", value=config.get("x").get("s")[0], parent_key=s_key, index=0)
s1_key = KeyIndex(key="s[1]", value=100, parent_key=s_key, index=1)
id_keys = [
KeyIndex(key="id", value=2, parent_key=z0_key),
KeyIndex(key="id", value=3, parent_key=z1_key),
KeyIndex(key="id", value=4, parent_key=z2_key),
KeyIndex(key="id", value=2, parent_key=s0_key),
]
expected_keys = {
"x31": x31_key,
"x32": x32_key,
"x22": x22_key,
"x21": x21_key,
"x11": x11_key,
"y21": y21_key,
"y11": y11_key,
"z[3]": z3_key,
"id": id_keys,
"s[1]": s1_key,
}
key_indices = build_dict_reverse_order_index(config=CF.from_dict(config_dict))
print("\n\n")
for key in key_indices:
e = expected_keys[key]
b_list = key_indices[key]
if len(b_list) == 1:
b = b_list[0]
a = e
assert key == a.key
assert key == b.key
assert a.key == b.key and a.value == b.value
assert a.index == b.index
if b.component_name is None or b.component_name.strip() == "":
assert a.component_name is None or a.component_name.strip() == ""
else:
assert a.component_name == b.component_name
assert a.parent_key.key == b.parent_key.key
assert a.parent_key.value == b.parent_key.value
assert a.parent_key.index == b.parent_key.index
else:
xs = zip(e, b_list)
for a, b in xs:
assert a.key == b.key and a.value == b.value
assert a.index == b.index
if b.component_name is None or b.component_name.strip() == "":
assert a.component_name is None or a.component_name.strip() == ""
else:
assert a.component_name == b.component_name
assert a.parent_key.key == b.parent_key.key
assert a.parent_key.value == b.parent_key.value
assert a.parent_key.index == b.parent_key.index
diff1 = set(key_indices.keys()) - set(expected_keys.keys())
diff2 = set(expected_keys.keys()) - set(key_indices.keys())
assert len(diff2) == 0
assert len(diff1) == 0
def test_extract_string_with_index(self):
input_string = "components[0].args.data_path"
tokens = extract_string_with_index(input_string)
assert tokens == [("components", 0, ["args.data_path"])]
def test_extract_file_from_dict_by_index(self):
config_str = """
{
"components": [
{
"id": "df_stats_generator",
"path": "df_statistics.DFStatistics",
"args": {
"data_path": "data.csv"
}
},
{
"id": "min_max_cleanser",
"path": "nvflare.app_common.statistics.min_max_cleanser.AddNoiseToMinMax",
"args": {
"min_noise_level": 0.1,
"max_noise_level": 0.3
}
}
]
}
"""
conf = CF.parse_string(config_str)
key_indices = build_dict_reverse_order_index(config=conf)
result = {}
exclude_key_list = []
result = filter_config_name_and_values(exclude_key_list, key_indices)
key_index = result["data_path"]
assert key_index.value == "data.csv"
| NVFlare-main | tests/unit_test/tool/job/config/config_indexer_test.py |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
from sphinx.domains.python import PythonDomain
import subprocess
class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if "refspecific" in node:
del node["refspecific"]
return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
sys.path.insert(0, os.path.abspath(".."))
print(sys.path)
# -- Project information -----------------------------------------------------
project = "NVIDIA FLARE"
copyright = "2023, NVIDIA"
author = "NVIDIA"
# The full version, including alpha/beta/rc tags
release = "2.3.0"
version = "2.3.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Add napoleon to the extensions list
# source_parsers = {'.md': CommonMarkParser}
templates_path = ["templates"]
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
".md": "markdown",
}
extensions = [
"recommonmark",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinx_copybutton",
"sphinxcontrib.jquery"
]
autoclass_content = "both"
add_module_names = False
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
"collapse_navigation": True,
"display_version": True,
"navigation_depth": 8,
"sticky_navigation": True, # Set to False to disable the sticky nav while scrolling.
# 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text
}
html_scaled_image_link = False
html_show_sourcelink = True
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def generate_apidocs(*args):
"""Generate API docs automatically by trawling the available modules"""
module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "nvflare"))
output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "apidocs"))
print(f"output_path {output_path}")
print(f"module_path {module_path}")
subprocess.check_call(
[sys.executable, "-m", "sphinx.ext.apidoc", "-f", "-e"]
+ ["-o", output_path]
+ [module_path]
+ [os.path.join(module_path, p) for p in exclude_patterns]
)
def setup(app):
app.connect("builder-inited", generate_apidocs)
app.add_domain(PatchedPythonDomain, override=True)
app.add_css_file("css/additions.css")
| NVFlare-main | docs/conf.py |
def _get_model_weights(self) -> Shareable:
# Get state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=new_weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations}
)
return outgoing_dxo.to_shareable()
| NVFlare-main | docs/programming_guide/resources/te.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import numpy as np
import pandas as pd
def data_split_args_parser():
parser = argparse.ArgumentParser(description="Generate data split for dataset")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, default=2, help="Total number of sites")
parser.add_argument(
"--rows_total_percentage",
type=float,
default=1.0,
help="Percentage of dataset_rows_total to use for rows_total",
)
parser.add_argument(
"--rows_overlap_percentage",
type=float,
default=0.5,
help="Percentage of rows_total to use for rows_overlap between sites",
)
parser.add_argument("--out_path", type=str, default="~/dataset", help="Output path for the data split file")
parser.add_argument("--out_file", type=str, default="data.csv", help="Output file name for the data split file")
return parser
def split_num_proportion(n, site_num):
split = []
ratio_vec = np.ones(site_num)
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def main():
parser = data_split_args_parser()
args = parser.parse_args()
df = pd.read_csv(args.data_path, header=None)
dataset_rows_total, cols_total = df.shape[0], df.shape[1]
rows_total = int(dataset_rows_total * args.rows_total_percentage)
rows_overlap = int(rows_total * args.rows_overlap_percentage)
print(f"site_num: {args.site_num}")
print(
f"dataset_num_rows: {dataset_rows_total}, rows_total_percentage: {args.rows_total_percentage}, rows_total: {rows_total}"
)
print(f"rows_overlap_percentage: {args.rows_overlap_percentage}, rows_overlap: {rows_overlap}")
print(f"cols_total: {cols_total}")
df["uid"] = df.index.to_series().map(lambda x: "uid_" + str(x))
site_col_size = split_num_proportion(cols_total, args.site_num)
site_row_size = split_num_proportion(rows_total - rows_overlap, args.site_num)
if os.path.exists(args.out_path):
shutil.rmtree(args.out_path)
for site in range(args.site_num):
col_start = sum(site_col_size[:site])
col_end = sum(site_col_size[: site + 1])
row_start = sum(site_row_size[:site])
row_end = sum(site_row_size[: site + 1])
df_split = pd.concat(
[
df.iloc[row_start:row_end, np.r_[col_start:col_end, cols_total]],
df.iloc[
rows_total - rows_overlap : rows_total,
np.r_[col_start:col_end, cols_total],
],
]
)
df_split = df_split.sample(frac=1)
print(f"site-{site+1} split rows [{row_start}:{row_end}],[{rows_total - rows_overlap}:{rows_total}]")
print(f"site-{site+1} split cols [{col_start}:{col_end}]")
data_path = os.path.join(args.out_path, f"site-{site + 1}")
if not os.path.exists(data_path):
os.makedirs(data_path, exist_ok=True)
df_split.to_csv(path_or_buf=os.path.join(data_path, args.out_file), index=False)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/vertical_xgboost/utils/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import xgboost as xgb
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
def _get_data_intersection(df, intersection_path, id_col):
with open(intersection_path) as intersection_file:
intersection = intersection_file.read().splitlines()
intersection.sort()
# Note: the order of the intersection must be maintained
intersection_df = df[df[id_col].isin(intersection)].copy()
intersection_df["sort"] = pd.Categorical(intersection_df[id_col], categories=intersection, ordered=True)
intersection_df = intersection_df.sort_values("sort")
intersection_df = intersection_df.drop([id_col, "sort"], axis=1)
if intersection_df.empty:
raise ValueError("private set intersection must not be empty")
return intersection_df
def _split_train_val(df, train_proportion):
num_train = int(df.shape[0] * train_proportion)
train_df = df.iloc[:num_train].copy()
valid_df = df.iloc[num_train:].copy()
return train_df, valid_df
class VerticalDataLoader(XGBDataLoader):
def __init__(self, data_split_path, psi_path, id_col, label_owner, train_proportion):
"""Reads intersection of dataset and returns train and validation XGB data matrices with column split mode.
Args:
data_split_path: path to data split file
psi_path: path to intersection file
id_col: column id used for psi
label_owner: client id that owns the label
train_proportion: proportion of intersected data to use for training
"""
self.data_split_path = data_split_path
self.psi_path = psi_path
self.id_col = id_col
self.label_owner = label_owner
self.train_proportion = train_proportion
def load_data(self, client_id: str):
client_data_split_path = self.data_split_path.replace("site-x", client_id)
client_psi_path = self.psi_path.replace("site-x", client_id)
data_split_dir = os.path.dirname(client_data_split_path)
train_path = os.path.join(data_split_dir, "train.csv")
valid_path = os.path.join(data_split_dir, "valid.csv")
if not (os.path.exists(train_path) and os.path.exists(valid_path)):
df = pd.read_csv(client_data_split_path)
intersection_df = _get_data_intersection(df, client_psi_path, self.id_col)
train_df, valid_df = _split_train_val(intersection_df, self.train_proportion)
train_df.to_csv(path_or_buf=train_path, header=False, index=False)
valid_df.to_csv(path_or_buf=valid_path, header=False, index=False)
if client_id == self.label_owner:
label = "&label_column=0"
else:
label = ""
# for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode
dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=1)
dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=1)
return dtrain, dvalid
| NVFlare-main | examples/advanced/vertical_xgboost/jobs/vertical_xgb/app/custom/vertical_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import List
import pandas as pd
from nvflare.app_common.psi.psi_spec import PSI
class LocalPSI(PSI):
def __init__(self, psi_writer_id: str, data_split_path: str, id_col: str):
super().__init__(psi_writer_id)
self.data_split_path = data_split_path
self.id_col = id_col
self.data = {}
def load_items(self) -> List[str]:
client_id = self.fl_ctx.get_identity_name()
client_data_split_path = self.data_split_path.replace("site-x", client_id)
if os.path.isfile(client_data_split_path):
df = pd.read_csv(client_data_split_path, header=0)
else:
raise RuntimeError(f"invalid data path {client_data_split_path}")
# Note: the PSI algorithm requires the items are unique
items = list(df[self.id_col])
return items
| NVFlare-main | examples/advanced/vertical_xgboost/jobs/vertical_xgb_psi/app/custom/local_psi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pathlib
import shutil
from enum import Enum
from typing import List
import numpy as np
from nvflare.apis.fl_constant import JobConstants
JOBS_ROOT = "jobs"
class SplitMethod(Enum):
UNIFORM = "uniform"
LINEAR = "linear"
SQUARE = "square"
EXPONENTIAL = "exponential"
def job_config_args_parser():
parser = argparse.ArgumentParser(description="generate train configs with data split")
parser.add_argument("--task_name", type=str, help="Task name for the config")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument(
"--data_size",
type=int,
default=0,
help="Total data size, use if specified, in order to use partial data"
"If not specified, use the full data size fetched from file.",
)
parser.add_argument(
"--valid_frac",
type=float,
help="Validation fraction of the total size, N = round(total_size* valid_frac), "
"the first N to be treated as validation data. "
"special case valid_frac = 1, where all data will be used"
"in validation, e.g. for evaluating unsupervised clustering with known ground truth label.",
)
parser.add_argument(
"--split_method",
type=str,
default="uniform",
choices=["uniform", "linear", "square", "exponential"],
help="How to split the dataset",
)
return parser
def get_split_ratios(site_num: int, split_method: SplitMethod):
if split_method == SplitMethod.UNIFORM:
ratio_vec = np.ones(site_num)
elif split_method == SplitMethod.LINEAR:
ratio_vec = np.linspace(1, site_num, num=site_num)
elif split_method == SplitMethod.SQUARE:
ratio_vec = np.square(np.linspace(1, site_num, num=site_num))
elif split_method == SplitMethod.EXPONENTIAL:
ratio_vec = np.exp(np.linspace(1, site_num, num=site_num))
else:
raise ValueError(f"Split method {split_method.name} not implemented!")
return ratio_vec
def split_num_proportion(n, site_num, split_method: SplitMethod) -> List[int]:
split = []
ratio_vec = get_split_ratios(site_num, split_method)
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def assign_data_index_to_sites(
data_size: int,
valid_fraction: float,
num_sites: int,
site_name_prefix: str,
split_method: SplitMethod = SplitMethod.UNIFORM,
) -> dict:
if valid_fraction > 1.0:
raise ValueError("validation percent should be less than or equal to 100% of the total data")
elif valid_fraction < 1.0:
valid_size = int(round(data_size * valid_fraction, 0))
train_size = data_size - valid_size
else:
valid_size = data_size
train_size = data_size
site_sizes = split_num_proportion(train_size, num_sites, split_method)
split_data_indices = {
"valid": {"start": 0, "end": valid_size},
}
for site in range(num_sites):
site_id = site_name_prefix + str(site + 1)
if valid_fraction < 1.0:
idx_start = valid_size + sum(site_sizes[:site])
idx_end = valid_size + sum(site_sizes[: site + 1])
else:
idx_start = sum(site_sizes[:site])
idx_end = sum(site_sizes[: site + 1])
split_data_indices[site_id] = {"start": idx_start, "end": idx_end}
return split_data_indices
def get_file_line_count(input_path: str) -> int:
count = 0
with open(input_path, "r") as fp:
for i, _ in enumerate(fp):
count += 1
return count
def split_data(
data_path: str,
site_num: int,
data_size: int,
valid_frac: float,
site_name_prefix: str = "site-",
split_method: SplitMethod = SplitMethod.UNIFORM,
):
size_total_file = get_file_line_count(data_path)
if data_size > 0:
if data_size > size_total_file:
raise ValueError("data_size should be less than or equal to the true data size")
else:
size_total = data_size
else:
size_total = size_total_file
site_indices = assign_data_index_to_sites(size_total, valid_frac, site_num, site_name_prefix, split_method)
return site_indices
def _read_json(filename):
if not os.path.isfile(filename):
raise ValueError(f"{filename} does not exist!")
with open(filename, "r") as f:
return json.load(f)
def _write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=2)
def _get_job_name(args) -> str:
return args.task_name + "_" + str(args.site_num) + "_" + args.split_method
def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict:
deploy_map = {"app_server": ["server"]}
for i in range(1, num_sites + 1):
deploy_map[f"app_{site_name_prefix}{i}"] = [f"{site_name_prefix}{i}"]
return deploy_map
def _update_meta(meta: dict, args):
name = _get_job_name(args)
meta["name"] = name
meta["deploy_map"] = _gen_deploy_map(args.site_num, args.site_name_prefix)
meta["min_clients"] = args.site_num
def _update_client_config(config: dict, args, site_name: str, site_indices):
# update client config
# data path and training/validation row indices
config["components"][0]["args"]["data_path"] = args.data_path
config["components"][0]["args"]["train_start"] = site_indices[site_name]["start"]
config["components"][0]["args"]["train_end"] = site_indices[site_name]["end"]
config["components"][0]["args"]["valid_start"] = site_indices["valid"]["start"]
config["components"][0]["args"]["valid_end"] = site_indices["valid"]["end"]
def _update_server_config(config: dict, args):
config["min_clients"] = args.site_num
def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name):
dst_path = dst_job_path / dst_app_name / "custom"
os.makedirs(dst_path, exist_ok=True)
src_path = src_job_path / src_app_name / "custom"
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
def create_server_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
server_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.SERVER_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
_update_server_config(server_config, args)
server_config_filename = dst_config_path / JobConstants.SERVER_JOB_CONFIG
_write_json(server_config, server_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def create_client_app(src_job_path, src_app_name, dst_job_path, site_name, site_indices, args):
dst_app_name = f"app_{site_name}"
client_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.CLIENT_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
# adjust file contents according to each job's specs
_update_client_config(client_config, args, site_name, site_indices)
client_config_filename = dst_config_path / JobConstants.CLIENT_JOB_CONFIG
_write_json(client_config, client_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def main():
parser = job_config_args_parser()
args = parser.parse_args()
job_name = _get_job_name(args)
src_name = args.task_name + "_base"
src_job_path = pathlib.Path(JOBS_ROOT) / src_name
# create a new job
dst_job_path = pathlib.Path(JOBS_ROOT) / job_name
if not os.path.exists(dst_job_path):
os.makedirs(dst_job_path)
# update meta
meta_config_dst = dst_job_path / JobConstants.META_FILE
meta_config = _read_json(src_job_path / JobConstants.META_FILE)
_update_meta(meta_config, args)
_write_json(meta_config, meta_config_dst)
# create server side app
create_server_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name="server",
args=args,
)
# generate data split
site_indices = split_data(
args.data_path,
args.site_num,
args.data_size,
args.valid_frac,
args.site_name_prefix,
)
# create client side app
for i in range(1, args.site_num + 1):
create_client_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name=f"{args.site_name_prefix}{i}",
site_indices=site_indices,
args=args,
)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/sklearn-linear/utils/prepare_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Optional, Tuple
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_opt.sklearn.data_loader import load_data_for_range
class LinearLearner(Learner):
def __init__(
self,
data_path: str,
train_start: int,
train_end: int,
valid_start: int,
valid_end: int,
random_state: int = None,
):
super().__init__()
self.data_path = data_path
self.train_start = train_start
self.train_end = train_end
self.valid_start = valid_start
self.valid_end = valid_end
self.random_state = random_state
self.train_data = None
self.valid_data = None
self.n_samples = None
self.local_model = None
self.n_features = None
def load_data(self) -> dict:
train_data = load_data_for_range(self.data_path, self.train_start, self.train_end)
valid_data = load_data_for_range(self.data_path, self.valid_start, self.valid_end)
return {"train": train_data, "valid": valid_data}
def initialize(self, parts: dict, fl_ctx: FLContext):
self.log_info(fl_ctx, f"Loading data from {self.data_path}")
data = self.load_data()
self.train_data = data["train"]
self.valid_data = data["valid"]
# train data size, to be used for setting
# NUM_STEPS_CURRENT_ROUND for potential aggregation
self.n_samples = data["train"][-1]
self.n_features = data["train"][0].shape[1]
# model will be created after receiving global parameters
def set_parameters(self, params):
self.local_model.coef_ = params["coef"]
if self.local_model.fit_intercept:
self.local_model.intercept_ = params["intercept"]
def train(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
(x_train, y_train, train_size) = self.train_data
if curr_round == 0:
# initialize model with global_param
# and set to all zero
fit_intercept = bool(global_param["fit_intercept"])
self.local_model = SGDClassifier(
loss=global_param["loss"],
penalty=global_param["penalty"],
fit_intercept=fit_intercept,
learning_rate=global_param["learning_rate"],
eta0=global_param["eta0"],
max_iter=1,
warm_start=True,
random_state=self.random_state,
)
n_classes = global_param["n_classes"]
self.local_model.classes_ = np.array(list(range(n_classes)))
self.local_model.coef_ = np.zeros((1, self.n_features))
if fit_intercept:
self.local_model.intercept_ = np.zeros((1,))
# Training starting from global model
# Note that the parameter update using global model has been performed
# during global model evaluation
self.local_model.fit(x_train, y_train)
if self.local_model.fit_intercept:
params = {
"coef": self.local_model.coef_,
"intercept": self.local_model.intercept_,
}
else:
params = {"coef": self.local_model.coef_}
return copy.deepcopy(params), self.local_model
def validate(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# set local model with global parameters
self.set_parameters(global_param)
# perform validation
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = self.local_model.predict(x_valid)
auc = roc_auc_score(y_valid, y_pred)
self.log_info(fl_ctx, f"AUC {auc:.4f}")
metrics = {"AUC": auc}
return metrics, self.local_model
def finalize(self, fl_ctx: FLContext):
# freeing resources in finalize
del self.train_data
del self.valid_data
self.log_info(fl_ctx, "Freed training resources")
| NVFlare-main | examples/advanced/sklearn-linear/jobs/sklearn_linear_base/app/custom/linear_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: root path containing all jsons
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
datalist_json_path_client = os.path.join(
datalist_json_path,
client_id + ".json",
)
return datalist_json_path_client
| NVFlare-main | examples/advanced/prostate/prostate_2D/custom/utils/custom_client_datalist_json_path.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import torch
import torch.optim as optim
from helpers.supervised_pt_ditto import SupervisedPTDittoHelper
from learners.supervised_monai_prostate_learner import SupervisedMonaiProstateLearner
from monai.losses import DiceLoss
from monai.networks.nets.unet import UNet
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class SupervisedMonaiProstateDittoLearner(SupervisedMonaiProstateLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
ditto_model_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Trainer for prostate segmentation task. It inherits from MONAI trainer.
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs of global model for a round. Defaults to 1.
ditto_model_epochs: the number of training epochs of personalized model for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
SupervisedMonaiProstateLearner.__init__(
self,
train_config_filename=train_config_filename,
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.ditto_helper = None
self.ditto_model_epochs = ditto_model_epochs
def train_config(self, fl_ctx: FLContext):
# Initialize superclass
SupervisedMonaiProstateLearner.train_config(self, fl_ctx)
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# Initialize PTDittoHelper
ditto_model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
ditto_optimizer = optim.Adam(ditto_model.parameters(), lr=self.config_info["ditto_learning_rate"])
self.ditto_helper = SupervisedPTDittoHelper(
criterion=DiceLoss(sigmoid=True),
model=ditto_model,
optimizer=ditto_optimizer,
device=self.device,
app_dir=app_dir,
ditto_lambda=self.config_info["ditto_lambda"],
model_epochs=self.ditto_model_epochs,
)
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Training task pipeline for Ditto
Get global model weights (potentially with HE)
Prepare for fedprox loss
Load Ditto personalized model info
Local training reference model and personalized model
Return updated weights of reference model (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# Load Ditto personalized model
self.ditto_helper.load_model(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for
# 1. FedProx loss of reference model
# 2. Ditto loss of personalized model
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# local train reference model
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# local train ditto model
self.ditto_helper.local_train(
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
writer=self.writer,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# local valid ditto model each round
metric = self.local_valid(
self.ditto_helper.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_per_model",
record_epoch=self.ditto_helper.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_per_model: {metric:.4f}")
# save model
self.ditto_helper.update_metric_save_model(metric=metric)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
| NVFlare-main | examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_ditto_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from abc import abstractmethod
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Simple Supervised Trainer.
This provides the basic functionality of a local learner: perform before-train validation on
global model at the beginning of each round, perform local training, and send the updated weights.
No model will be saved locally, tensorboard record for local loss and global model validation score.
Enabled both FedAvg and FedProx
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.best_metric = 0.0
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# FedProx related
self.fedproxloss_mu = 0.0
self.criterion_prox = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
self.train_config(fl_ctx)
@abstractmethod
def train_config(self, fl_ctx: FLContext):
"""Traning configurations customized to individual tasks
This can be specified / loaded in any ways
as long as they are made available for further training and validation
some potential items include but not limited to:
self.lr
self.fedproxloss_mu
self.model
self.device
self.optimizer
self.criterion
self.transform_train
self.transform_valid
self.transform_post
self.train_loader
self.valid_loader
self.inferer
self.valid_metric
"""
raise NotImplementedError
@abstractmethod
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
model_global,
abort_signal: Signal,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader: image / label
Compute outputs with self.model
Compute loss with self.criterion
Add fedprox loss
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
record_epoch=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric, record_epoch)
return metric
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline with potential HE and fedprox functionalities
Get global model weights (potentially with HE)
Prepare for fedprox loss
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss
if self.fedproxloss_mu > 0:
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
else:
model_global = None
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Validation on local data
Return validation score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
record_epoch=self.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | examples/advanced/prostate/prostate_2D/custom/learners/supervised_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
import torch.optim as optim
from learners.supervised_learner import SupervisedLearner
from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets.unet import UNet
from monai.transforms import (
Activations,
AsDiscrete,
AsDiscreted,
Compose,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
Resized,
ScaleIntensityRanged,
)
from utils.custom_client_datalist_json_path import custom_client_datalist_json_path
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class SupervisedMonaiProstateLearner(SupervisedLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""MONAI Learner for prostate segmentation task.
It inherits from SupervisedLearner.
Args:
train_config_filename: path for config file, this is an addition term for config loading
aggregation_epochs: the number of training epochs for a round.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__(
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.train_config_filename = train_config_filename
self.config_info = None
def train_config(self, fl_ctx: FLContext):
"""MONAI traning configuration
Here, we use a json to specify the needed parameters
"""
# Load training configurations json
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
self.config_info = json.load(file)
# Get the config_info
self.lr = self.config_info["learning_rate"]
self.fedproxloss_mu = self.config_info["fedproxloss_mu"]
cache_rate = self.config_info["cache_dataset"]
dataset_base_dir = self.config_info["dataset_base_dir"]
datalist_json_path = self.config_info["datalist_json_path"]
# Get datalist json
datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
# Set datalist
train_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="training",
base_dir=dataset_base_dir,
)
valid_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
self.log_info(
fl_ctx,
f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.criterion = DiceLoss(sigmoid=True)
if self.fedproxloss_mu > 0:
self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
self.transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
Resized(
keys=["image", "label"],
spatial_size=(256, 256),
mode=("bilinear"),
align_corners=True,
),
AsDiscreted(keys=["label"], threshold=0.5),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
if cache_rate > 0.0:
self.train_dataset = CacheDataset(
data=train_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=4,
)
self.valid_dataset = CacheDataset(
data=valid_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=4,
)
else:
self.train_dataset = Dataset(
data=train_list,
transform=self.transform,
)
self.valid_dataset = Dataset(
data=valid_list,
transform=self.transform,
)
self.train_loader = DataLoader(
self.train_dataset,
batch_size=1,
shuffle=True,
num_workers=2,
)
self.valid_loader = DataLoader(
self.valid_dataset,
batch_size=1,
shuffle=False,
num_workers=2,
)
# Set inferer and evaluation metric
self.inferer = SimpleInferer()
self.valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
| NVFlare-main | examples/advanced/prostate/prostate_2D/custom/learners/supervised_monai_prostate_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.shareable import make_reply
from nvflare.apis.signal import Signal
from nvflare.app_opt.pt.ditto import PTDittoHelper
class SupervisedPTDittoHelper(PTDittoHelper):
"""Helper to be used with Ditto components under supervised training specs."""
def __init__(self, criterion, model, optimizer, device, app_dir, ditto_lambda, model_epochs):
super().__init__(criterion, model, optimizer, device, app_dir, ditto_lambda, model_epochs)
def local_train(self, train_loader, model_global, abort_signal: Signal, writer):
# Train personal model for self.model_epochs, and keep track of curves
# This part is task dependent, need customization
for epoch in range(self.model_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch + 1
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# add the Ditto prox loss term for Ditto
loss_ditto = self.prox_criterion(self.model, model_global)
loss += loss_ditto
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
writer.add_scalar("train_loss_ditto", loss.item(), current_step)
| NVFlare-main | examples/advanced/prostate/prostate_2D/custom/helpers/supervised_pt_ditto.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from monai.data import CacheDataset, DataLoader, load_decathlon_datalist
from monai.inferers import SimpleInferer
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.transforms import (
Activations,
AsDiscrete,
AsDiscreted,
Compose,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
Resized,
ScaleIntensityRanged,
)
def main():
parser = argparse.ArgumentParser(description="Model Testing")
parser.add_argument("--model_path", type=str)
parser.add_argument("--cache_rate", default=1.0, type=float)
parser.add_argument("--dataset_base_dir", default="../data_preparation/dataset_2D", type=str)
parser.add_argument("--datalist_json_path", default="../data_preparation/datalist_2D/client_All.json", type=str)
args = parser.parse_args()
# Set basic settings and paths
dataset_base_dir = args.dataset_base_dir
datalist_json_path = args.datalist_json_path
model_path = args.model_path
cache_rate = args.cache_rate
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set datalists
test_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="testing",
base_dir=dataset_base_dir,
)
print(f"Testing Size: {len(test_list)}")
# Network, optimizer, and loss
model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
model_weights = torch.load(model_path)
model_weights = model_weights["model"]
model.load_state_dict(model_weights)
# Inferer, evaluation metric
inferer = SimpleInferer()
valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
Resized(keys=["image", "label"], spatial_size=(256, 256), mode=("bilinear"), align_corners=True),
AsDiscreted(keys=["label"], threshold=0.5),
EnsureTyped(keys=["image", "label"]),
]
)
transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
test_dataset = CacheDataset(
data=test_list,
transform=transform,
cache_rate=cache_rate,
num_workers=4,
)
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
# Train
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(test_loader):
images = batch_data["image"].to(device)
labels = batch_data["label"].to(device)
# Inference
outputs = inferer(images, model)
outputs = transform_post(outputs)
# Compute metric
metric_score = valid_metric(y_pred=outputs, y=labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(test_loader)
print(f"Test Dice: {metric:.4f}")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/prostate/result_stat/prostate_2d_test_only.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# simulator workspace
client_results_root = "../prostate_2D/workspaces/"
# client_results_root = "../prostate_3D/workspaces"
client_pre = "app_client_"
# 4 (for 3D) or 6 (for 2D) sites
sites_fl = ["I2CVB", "MSD", "NCI_ISBI_3T", "NCI_ISBI_Dx", "Promise12", "PROSTATEx"]
# sites_fl = ["I2CVB", "MSD", "NCI_ISBI_3T", "NCI_ISBI_Dx"]
# Central vs. FedAvg vs. FedProx vs. Ditto
experiments = {
"prostate_central": {"tag": "val_metric_global_model", "site": "All"},
"prostate_fedavg": {"tag": "val_metric_global_model"},
"prostate_fedprox": {"tag": "val_metric_global_model"},
"prostate_ditto": {"tag": "val_metric_per_model"},
}
weight = 0.8
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def read_eventfile(filepath, tags=["val_metric_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_metric_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Epoch"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["Dice"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
num_site = len(sites_fl)
i = 1
# add event files
data = {"Config": [], "Epoch": [], "Dice": []}
for site in sites_fl:
# clear data for each site
data = {"Config": [], "Epoch": [], "Dice": []}
for config, exp in experiments.items():
spec_site = exp.get("site", None)
if spec_site is not None:
record_path = os.path.join(
client_results_root + config, "simulate_job", client_pre + spec_site, "events.*"
)
else:
record_path = os.path.join(client_results_root + config, "simulate_job", client_pre + site, "events.*")
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
ax = plt.subplot(2, int(num_site / 2), i)
ax.set_title(site)
sns.lineplot(x="Epoch", y="Dice", hue="Config", data=data)
# ax.set_xlim([0, 1000])
i = i + 1
plt.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/prostate/result_stat/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from monai.data import DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SlidingWindowInferer
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.transforms import (
Activations,
AsDiscrete,
Compose,
DivisiblePadd,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
NormalizeIntensityd,
Orientationd,
Spacingd,
)
def main():
parser = argparse.ArgumentParser(description="Model Testing")
parser.add_argument("--model_path", type=str)
parser.add_argument("--cache_rate", default=1.0, type=float)
parser.add_argument("--dataset_base_dir", default="../data_preparation/dataset", type=str)
parser.add_argument("--datalist_json_path", default="../data_preparation/datalist/client_All.json", type=str)
args = parser.parse_args()
# Set basic settings and paths
dataset_base_dir = args.dataset_base_dir
datalist_json_path = args.datalist_json_path
model_path = args.model_path
infer_roi_size = (224, 224, 32)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set datalists
test_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
print(f"Testing Size: {len(test_list)}")
# Network, optimizer, and loss
model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
model_weights = torch.load(model_path)
model_weights = model_weights["model"]
model.load_state_dict(model_weights)
# Inferer, evaluation metric
inferer = SlidingWindowInferer(roi_size=infer_roi_size, sw_batch_size=4, overlap=0.25)
valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
Spacingd(
keys=["image", "label"],
pixdim=(0.3, 0.3, 1.0),
mode=("bilinear", "nearest"),
),
DivisiblePadd(keys=["image", "label"], k=32),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
EnsureTyped(keys=["image", "label"]),
]
)
transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
test_dataset = Dataset(data=test_list, transform=transform)
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
# Train
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(test_loader):
images = batch_data["image"].to(device)
labels = batch_data["label"].to(device)
# Inference
outputs = inferer(images, model)
outputs = transform_post(outputs)
# Compute metric
metric_score = valid_metric(y_pred=outputs, y=labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(test_loader)
print(f"Test Dice: {metric:.4f}")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/prostate/result_stat/prostate_3d_test_only.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
parser = argparse.ArgumentParser("Threshold label image to a binary one")
parser.add_argument("--input_path", help="Input label image path", type=str)
parser.add_argument("--output_path", help="Output binary image path", type=str)
parser.add_argument("--threshold", help="threshold", type=int, default=0)
args = parser.parse_args()
img = nib.load(args.input_path)
img_np = img.get_fdata()
img_affine = img.affine
img_np[img_np > args.threshold] = 1
nft_img = nib.Nifti1Image(img_np, img_affine)
nib.save(nft_img, args.output_path)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/label_threshold.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import dicom2nifti
parser = argparse.ArgumentParser("Dicom to Nifti converter")
parser.add_argument("--dicom_folder", help="Input Dicom folder path", type=str)
parser.add_argument("--nifti_path", help="Output Nifti file path", type=str)
args = parser.parse_args()
dicom2nifti.dicom_series_to_nifti(args.dicom_folder, args.nifti_path)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/dicom_to_nifti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import os
import numpy as np
np.random.seed(0)
# Prostate data is arranged as ${data_dir}/${site_name}/Image/ and ${data_dir}/${site_name}/Mask/
# Image-Mask pairs have identical filename, and stored separately
# output json file is named client_${site_name}.json
parser = argparse.ArgumentParser(description="generate train/valid/test splits for datasets")
parser.add_argument(
"--mode", type=str, help="Split mode, mode can either be 'folder' or 'file', controlling the split level"
)
parser.add_argument("--data_dir", type=str, help="Path to data folder")
parser.add_argument("--site_name", type=str, help="Path to particular set")
parser.add_argument("--train", type=float, default=0.5, help="Portion of training set, default 50%")
parser.add_argument("--valid", type=float, default=0.25, help="Portion of validation set, default 25%")
parser.add_argument("--test", type=float, default=0.25, help="Portion of testing set, default 25%")
parser.add_argument("--out_path", type=str, help="Path to datalist json file")
def partition_data(mode, data_path, site_name, train, valid, test, out_path):
assert mode in ["folder", "file"], "mode should either be 'folder' or 'file'"
print(f"Generate data split for {data_path}/{site_name}, with train:validation:test {train}:{valid}:{test}")
print(f"Save json to {out_path}")
print(f"Mode: {mode}")
tra = 0
val = 0
tst = 0
tra_i = 0
val_i = 0
tst_i = 0
total_file = 0
json_data = {"training": [], "validation": [], "testing": []}
image_file_path = os.path.join(data_path, site_name, "Image", "*")
mask_file_path = os.path.join(data_path, site_name, "Mask", "*")
image_files = glob.glob(image_file_path)
mask_files = glob.glob(mask_file_path)
assert len(image_files) == len(mask_files), "The number of image and mask files should be the same."
# sort will produce the same sequence since filenames are identical for image and masks
image_files.sort()
mask_files.sort()
# produce random index for split
length = len(image_files)
rand_idx = np.arange(length)
np.random.shuffle(rand_idx)
# check the ratio sum
assert (train + valid + test) == 1, "Sum of all three splits should be 1."
tra_cut = round(length * train)
val_cut = round(length * train) + round(length * valid)
for count in range(length):
# if folder, add all images inside it
if mode == "folder":
image_file_name = glob.glob(os.path.join(image_files[rand_idx[count]], "*"))
mask_file_name = glob.glob(os.path.join(mask_files[rand_idx[count]], "*"))
image_file_name.sort()
mask_file_name.sort()
elif mode == "file":
image_file_name = [image_files[rand_idx[count]]]
mask_file_name = [mask_files[rand_idx[count]]]
if count < tra_cut:
to_append = "training"
tra = tra + 1
tra_i = tra_i + len(image_file_name)
elif count < val_cut:
to_append = "validation"
val = val + 1
val_i = val_i + len(image_file_name)
else:
to_append = "testing"
tst = tst + 1
tst_i = tst_i + len(image_file_name)
total_file = total_file + len(image_file_name)
for idx in range(len(image_file_name)):
new_item = {}
# collect the paths, excluding the common data_root
new_item["image"] = image_file_name[idx].replace(data_path + "/", "")
new_item["label"] = mask_file_name[idx].replace(data_path + "/", "")
temp = json_data[to_append]
temp.append(new_item)
print(f"In total {length} cases, {tra} for training, {val} for validation, and {tst} for testing")
if mode == "folder":
print(
f"In total {total_file} samples, split at case level, {tra_i} for training, {val_i} for validation, and {tst_i} for testing"
)
with open(out_path, "w") as f:
json.dump(json_data, f, indent=4)
if __name__ == "__main__":
args = parser.parse_args()
partition_data(
mode=args.mode,
data_path=args.data_dir,
site_name=args.site_name,
train=args.train,
valid=args.valid,
test=args.test,
out_path=args.out_path,
)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/prepare_data_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
import nrrd
import numpy as np
parser = argparse.ArgumentParser("Convert nrrd label to nifti with reference image file for affine")
parser.add_argument("--input_path", help="Input nrrd path", type=str)
parser.add_argument("--reference_path", help="Reference image path", type=str)
parser.add_argument("--output_path", help="Output nifti path", type=str)
args = parser.parse_args()
img = nib.load(args.reference_path)
img_affine = img.affine
nrrd = nrrd.read(args.input_path)
data = np.flip(nrrd[0], axis=1)
nft_img = nib.Nifti1Image(data, img_affine)
nib.save(nft_img, args.output_path)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/nrrd_to_nifti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import json
def main():
parser = argparse.ArgumentParser(description="merge two jsons together")
parser.add_argument("--json_1", action="store", required=True, help="full path of json1")
parser.add_argument("--json_2", action="store", help="full path of json2")
parser.add_argument("--json_out", action="store", help="full path of json merged")
args = parser.parse_args()
json_1 = args.json_1
json_2 = args.json_2
json_out = args.json_out
with open(json_1) as a:
json_1_data = json.load(a)
with open(json_2) as b:
json_2_data = json.load(b)
json_data = copy.deepcopy(json_1_data)
json_data["training"].extend(json_2_data["training"])
json_data["validation"].extend(json_2_data["validation"])
json_data["testing"].extend(json_2_data["testing"])
with open(json_out, "w") as f:
json.dump(json_data, f, indent=4)
return
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/merge_two_jsons.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
parser = argparse.ArgumentParser("Select single channel from image and save as new image")
parser.add_argument("--input_path", help="Input multi-channel image path", type=str)
parser.add_argument("--output_path", help="Output single-channel image path", type=str)
parser.add_argument("--channel", help="channel number", type=int, default=0)
args = parser.parse_args()
img = nib.load(args.input_path)
img_np = img.get_fdata()
img_affine = img.affine
img_np = img_np[:, :, :, args.channel]
nft_img = nib.Nifti1Image(img_np, img_affine)
nib.save(nft_img, args.output_path)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/image_channel_select.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import pathlib
import nibabel as nib
import numpy as np
from PIL import Image
def main():
parser = argparse.ArgumentParser(description="Convert 3D prostate dataset for 2D experiment")
parser.add_argument("--data_dir", type=str, help="Path to all data folder")
parser.add_argument("--site_name", type=str, help="Path to particular set")
parser.add_argument("--out_path", type=str, help="Path to output 2D file folder")
args = parser.parse_args()
# output folder main path
image_out_path = os.path.join(args.out_path, args.site_name, "Image")
mask_out_path = os.path.join(args.out_path, args.site_name, "Mask")
# get input files
image_file_path = os.path.join(args.data_dir, args.site_name, "Image", "*")
mask_file_path = os.path.join(args.data_dir, args.site_name, "Mask", "*")
image_files = glob.glob(image_file_path)
mask_files = glob.glob(mask_file_path)
assert len(image_files) == len(mask_files), "The number of image and mask files should be the same."
# iterate through input files and convert 3D dataset to 2D
for idx in range(len(image_files)):
# collect the paths
image_file_name = image_files[idx]
mask_file_name = mask_files[idx]
# load image and mask
image_case_id = os.path.basename(image_file_name)
mask_case_id = os.path.basename(image_file_name)
assert image_case_id == mask_case_id, "Image and mask ID should match."
case_id = image_case_id.replace(".nii.gz", "")
# read nii.gz files with nibabel
image = nib.load(image_file_name).get_fdata().transpose((1, 0, 2))
mask = nib.load(mask_file_name).get_fdata().transpose((1, 0, 2))
# clip and normalize image
image = (image - 0) / (2048 - 0)
image[image > 1] = 1
# iterate through slice dimension
for slice_idx in range(image.shape[2]):
image_slice = image[:, :, slice_idx]
mask_slice = mask[:, :, slice_idx]
# only extract slices with mask annotation
if np.sum(mask_slice) > 0:
# scale to 0~255
image_slice = image_slice * 255
mask_slice = mask_slice * 255
# output path
pathlib.Path(os.path.join(image_out_path, case_id)).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(mask_out_path, case_id)).mkdir(parents=True, exist_ok=True)
# flip so as to follow clinical viewing orientation
im = Image.fromarray(image_slice).convert("L").transpose(Image.FLIP_TOP_BOTTOM)
im.save(os.path.join(image_out_path, case_id, "{}.png".format(slice_idx)))
im = Image.fromarray(mask_slice).convert("L").transpose(Image.FLIP_TOP_BOTTOM)
im.save(os.path.join(mask_out_path, case_id, "{}.png".format(slice_idx)))
return
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/preprocess_3d_to_2d.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
import numpy as np
parser = argparse.ArgumentParser("Combine label images to a binary one")
parser.add_argument(
"--ref_image", help="Reference image file path, to make sure algnment between image and mask", type=str
)
parser.add_argument("--input_folder_path", help="Input label image folder path", type=str)
parser.add_argument("--output_path", help="Output binary image path", type=str)
args = parser.parse_args()
ref = nib.load(args.ref_image)
ref_affine = ref.affine
ref_np = ref.get_fdata()
img = nib.load(args.input_folder_path + "/1.nii.gz")
img_np = img.get_fdata()
img = nib.load(args.input_folder_path + "/2.nii.gz")
img_np = img_np + img.get_fdata()
img = nib.load(args.input_folder_path + "/4.nii.gz")
img_np = img_np + img.get_fdata()
# Special treatment for urethra: if urethra only, then discard
# since it is usually not included in other prostate segmentation protocols
ure = nib.load(args.input_folder_path + "/3.nii.gz")
ure_np = ure.get_fdata()
for slice_idx in range(img_np.shape[2]):
image_slice = img_np[:, :, slice_idx]
ure_slice = ure_np[:, :, slice_idx]
if np.sum(image_slice) > 0:
image_slice = image_slice + ure_slice
img_np[:, :, slice_idx] = image_slice
img_np[img_np > 0] = 1
img_affine = img.affine
# reorient mask image
img_ornt = nib.io_orientation(img_affine)
ref_ornt = nib.io_orientation(ref_affine)
spatial_ornt = nib.orientations.ornt_transform(img_ornt, ref_ornt)
img_np = nib.orientations.apply_orientation(img_np, spatial_ornt)
# resample mask image
img = nib.Nifti1Image(img_np, ref_affine)
nib.save(img, args.output_path)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/label_combine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import SimpleITK as sitk
parser = argparse.ArgumentParser("Convert mhd file to nifti")
parser.add_argument("--input_path", help="Input mhd path", type=str)
parser.add_argument("--output_path", help="Output nifti path", type=str)
args = parser.parse_args()
reader = sitk.ImageFileReader()
reader.SetImageIO("MetaImageIO")
reader.SetFileName(args.input_path)
image = reader.Execute()
writer = sitk.ImageFileWriter()
writer.SetFileName(args.output_path)
writer.Execute(image)
| NVFlare-main | examples/advanced/prostate/data_preparation/utils/mhd_to_nifti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: root path containing all jsons
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
datalist_json_path_client = os.path.join(
datalist_json_path,
client_id + ".json",
)
return datalist_json_path_client
| NVFlare-main | examples/advanced/prostate/prostate_3D/custom/utils/custom_client_datalist_json_path.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import torch
import torch.optim as optim
from helpers.supervised_pt_ditto import SupervisedPTDittoHelper
from learners.supervised_monai_prostate_learner import SupervisedMonaiProstateLearner
from monai.losses import DiceLoss
from monai.networks.nets.unet import UNet
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class SupervisedMonaiProstateDittoLearner(SupervisedMonaiProstateLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
ditto_model_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Trainer for prostate segmentation task. It inherits from MONAI trainer.
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs of global model for a round. Defaults to 1.
ditto_model_epochs: the number of training epochs of personalized model for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
SupervisedMonaiProstateLearner.__init__(
self,
train_config_filename=train_config_filename,
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.ditto_helper = None
self.ditto_model_epochs = ditto_model_epochs
def train_config(self, fl_ctx: FLContext):
# Initialize superclass
SupervisedMonaiProstateLearner.train_config(self, fl_ctx)
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# Initialize PTDittoHelper
ditto_model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
ditto_optimizer = optim.SGD(
ditto_model.parameters(),
lr=self.config_info["ditto_learning_rate"],
momentum=0.9,
)
self.ditto_helper = SupervisedPTDittoHelper(
criterion=DiceLoss(sigmoid=True),
model=ditto_model,
optimizer=ditto_optimizer,
device=self.device,
app_dir=app_dir,
ditto_lambda=self.config_info["ditto_lambda"],
model_epochs=self.ditto_model_epochs,
)
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Training task pipeline for Ditto
Get global model weights (potentially with HE)
Prepare for fedprox loss
Load Ditto personalized model info
Local training reference model and personalized model
Return updated weights of reference model (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# Load Ditto personalized model
self.ditto_helper.load_model(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for
# 1. FedProx loss of reference model
# 2. Ditto loss of personalized model
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# local train reference model
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# local train ditto model
self.ditto_helper.local_train(
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
writer=self.writer,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# local valid ditto model each round
metric = self.local_valid(
self.ditto_helper.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_per_model",
record_epoch=self.ditto_helper.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_per_model: {metric:.4f}")
# save model
self.ditto_helper.update_metric_save_model(metric=metric)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
| NVFlare-main | examples/advanced/prostate/prostate_3D/custom/learners/supervised_monai_prostate_ditto_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from abc import abstractmethod
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Simple Supervised Trainer.
This provides the basic functionality of a local learner: perform before-train validation on
global model at the beginning of each round, perform local training, and send the updated weights.
No model will be saved locally, tensorboard record for local loss and global model validation score.
Enabled both FedAvg and FedProx
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.best_metric = 0.0
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# FedProx related
self.fedproxloss_mu = 0.0
self.criterion_prox = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
self.train_config(fl_ctx)
@abstractmethod
def train_config(self, fl_ctx: FLContext):
"""Traning configurations customized to individual tasks
This can be specified / loaded in any ways
as long as they are made available for further training and validation
some potential items include but not limited to:
self.lr
self.fedproxloss_mu
self.model
self.device
self.optimizer
self.criterion
self.transform_train
self.transform_valid
self.transform_post
self.train_loader
self.valid_loader
self.inferer
self.valid_metric
"""
raise NotImplementedError
@abstractmethod
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
model_global,
abort_signal: Signal,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader: image / label
Compute outputs with self.model
Compute loss with self.criterion
Add fedprox loss
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
record_epoch=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric, record_epoch)
return metric
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline with potential HE and fedprox functionalities
Get global model weights (potentially with HE)
Prepare for fedprox loss
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss
if self.fedproxloss_mu > 0:
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
else:
model_global = None
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Validation on local data
Return validation score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
record_epoch=self.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | examples/advanced/prostate/prostate_3D/custom/learners/supervised_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
import torch.optim as optim
from learners.supervised_learner import SupervisedLearner
from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SlidingWindowInferer
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets.unet import UNet
from monai.transforms import (
Activations,
AsDiscrete,
Compose,
DivisiblePadd,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
NormalizeIntensityd,
Orientationd,
RandCropByPosNegLabeld,
RandFlipd,
RandScaleIntensityd,
RandShiftIntensityd,
Spacingd,
)
from utils.custom_client_datalist_json_path import custom_client_datalist_json_path
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class SupervisedMonaiProstateLearner(SupervisedLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""MONAI Learner for prostate segmentation task.
It inherits from SupervisedLearner.
Args:
train_config_filename: path for config file, this is an addition term for config loading
aggregation_epochs: the number of training epochs for a round.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__(
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.train_config_filename = train_config_filename
self.config_info = None
def train_config(self, fl_ctx: FLContext):
"""MONAI traning configuration
Here, we use a json to specify the needed parameters
"""
# Load training configurations json
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
self.config_info = json.load(file)
# Get the config_info
self.lr = self.config_info["learning_rate"]
self.fedproxloss_mu = self.config_info["fedproxloss_mu"]
cache_rate = self.config_info["cache_dataset"]
dataset_base_dir = self.config_info["dataset_base_dir"]
datalist_json_path = self.config_info["datalist_json_path"]
self.roi_size = self.config_info.get("roi_size", (224, 224, 32))
self.infer_roi_size = self.config_info.get("infer_roi_size", (224, 224, 32))
# Get datalist json
datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
# Set datalist
train_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="training",
base_dir=dataset_base_dir,
)
valid_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
self.log_info(
fl_ctx,
f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
self.criterion = DiceLoss(sigmoid=True)
if self.fedproxloss_mu > 0:
self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
self.transform_train = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
Orientationd(keys=["image", "label"], axcodes="RAS"),
Spacingd(
keys=["image", "label"],
pixdim=(0.3, 0.3, 1.0),
mode=("bilinear", "nearest"),
),
DivisiblePadd(keys=["image", "label"], k=32),
RandCropByPosNegLabeld(
keys=["image", "label"],
label_key="label",
spatial_size=self.roi_size,
pos=1,
neg=1,
num_samples=4,
),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=1),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=2),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
RandScaleIntensityd(keys="image", factors=0.1, prob=1.0),
RandShiftIntensityd(keys="image", offsets=0.1, prob=1.0),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_valid = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
Spacingd(
keys=["image", "label"],
pixdim=(0.3, 0.3, 1.0),
mode=("bilinear", "nearest"),
),
DivisiblePadd(keys=["image", "label"], k=32),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
if cache_rate > 0.0:
self.train_dataset = CacheDataset(
data=train_list,
transform=self.transform_train,
cache_rate=cache_rate,
num_workers=1,
)
self.valid_dataset = CacheDataset(
data=valid_list,
transform=self.transform_valid,
cache_rate=cache_rate,
num_workers=1,
)
else:
self.train_dataset = Dataset(
data=train_list,
transform=self.transform_train,
)
self.valid_dataset = Dataset(
data=valid_list,
transform=self.transform_valid,
)
self.train_loader = DataLoader(
self.train_dataset,
batch_size=1,
shuffle=True,
num_workers=1,
)
self.valid_loader = DataLoader(
self.valid_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
# Set inferer and evaluation metric
self.inferer = SlidingWindowInferer(roi_size=self.infer_roi_size, sw_batch_size=4, overlap=0.25)
self.valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
| NVFlare-main | examples/advanced/prostate/prostate_3D/custom/learners/supervised_monai_prostate_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.shareable import make_reply
from nvflare.apis.signal import Signal
from nvflare.app_opt.pt.ditto import PTDittoHelper
class SupervisedPTDittoHelper(PTDittoHelper):
"""Helper to be used with Ditto components under supervised training specs."""
def __init__(self, criterion, model, optimizer, device, app_dir, ditto_lambda, model_epochs):
super().__init__(criterion, model, optimizer, device, app_dir, ditto_lambda, model_epochs)
def local_train(self, train_loader, model_global, abort_signal: Signal, writer):
# Train personal model for self.model_epochs, and keep track of curves
# This part is task dependent, need customization
for epoch in range(self.model_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch + 1
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# add the Ditto prox loss term for Ditto
loss_ditto = self.prox_criterion(self.model, model_global)
loss += loss_ditto
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
writer.add_scalar("train_loss_ditto", loss.item(), current_step)
| NVFlare-main | examples/advanced/prostate/prostate_3D/custom/helpers/supervised_pt_ditto.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import time
import torch
import torch.nn.functional as F
import tqdm
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score
from sklearn.multioutput import MultiOutputClassifier
from torch_geometric.data import Batch
from torch_geometric.datasets import PPI
from torch_geometric.loader import DataLoader, LinkNeighborLoader
from torch_geometric.nn import GraphSAGE
# (0) import nvflare client API
import nvflare.client as flare
# Create PPI dataset for training.
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data", "PPI")
train_dataset = PPI(path, split="train")
val_dataset = PPI(path, split="val")
test_dataset = PPI(path, split="test")
# Group all training graphs into a single graph to perform sampling:
train_data = Batch.from_data_list(train_dataset)
loader = LinkNeighborLoader(
train_data,
batch_size=2048,
shuffle=True,
neg_sampling_ratio=1.0,
num_neighbors=[10, 10],
num_workers=6,
persistent_workers=True,
)
print("finish setup train loader")
# Evaluation loaders (one datapoint corresponds to a graph)
train_loader = DataLoader(train_dataset, batch_size=2)
val_loader = DataLoader(val_dataset, batch_size=2)
test_loader = DataLoader(test_dataset, batch_size=2)
print("finish setup eval loaders")
n_iterations = len(train_loader)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = GraphSAGE(
in_channels=train_dataset.num_features,
hidden_channels=64,
num_layers=2,
out_channels=64,
)
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
print("finish setup model")
# (1) initializes NVFlare client API
flare.init()
print("finish init")
# (2) gets FLModel from NVFlare
input_model = flare.receive()
print("finish receive")
# (3) loads model from NVFlare
model.load_state_dict(input_model.params)
model.to(device)
print("finish load_state_dict")
def train():
model.train()
total_loss = total_examples = 0
for data in tqdm.tqdm(loader):
data = data.to(device)
optimizer.zero_grad()
h = model(data.x, data.edge_index)
h_src = h[data.edge_label_index[0]]
h_dst = h[data.edge_label_index[1]]
link_pred = (h_src * h_dst).sum(dim=-1) # Inner product.
loss = F.binary_cross_entropy_with_logits(link_pred, data.edge_label)
loss.backward()
optimizer.step()
total_loss += float(loss) * link_pred.numel()
total_examples += link_pred.numel()
return total_loss / total_examples
@torch.no_grad()
def encode(data_loader):
model.to(device)
model.eval()
xs, ys = [], []
for data in data_loader:
data = data.to(device)
xs.append(model(data.x, data.edge_index).cpu())
ys.append(data.y.cpu())
return torch.cat(xs, dim=0), torch.cat(ys, dim=0)
@torch.no_grad()
def test():
# Train classifier on training set:
x, y = encode(train_loader)
clf = MultiOutputClassifier(SGDClassifier(loss="log_loss", penalty="l2"))
clf.fit(x, y)
train_f1 = f1_score(y, clf.predict(x), average="micro")
# Evaluate on validation set:
x, y = encode(val_loader)
val_f1 = f1_score(y, clf.predict(x), average="micro")
# Evaluate on test set:
x, y = encode(test_loader)
test_f1 = f1_score(y, clf.predict(x), average="micro")
return train_f1, val_f1, test_f1
times = []
_, _, global_test_f1 = test()
print(f"Global Test F1: {global_test_f1:.4f}")
number_epochs = 60
# (optional) calculate total steps
steps = number_epochs * len(loader)
print(steps)
for epoch in range(1, number_epochs):
start = time.time()
loss = train()
print(f"Epoch: {epoch:02d}, Loss: {loss:.4f}")
train_f1, val_f1, test_f1 = test()
print(f"Train F1: {train_f1:.4f}, Val F1: {val_f1:.4f}, " f"Test F1: {test_f1:.4f}")
times.append(time.time() - start)
print(f"Median time per epoch: {torch.tensor(times).median():.4f}s")
# (5) construct the FLModel to returned back
output_model = flare.FLModel(
params=model.cpu().state_dict(),
params_type="FULL",
metrics={"test_f1": global_test_f1},
meta={"NUM_STEPS_CURRENT_ROUND": steps},
)
# (6) send back model
flare.send(output_model)
| NVFlare-main | examples/advanced/gnn-pt/jobs/gnn-pt/app/custom/graphsage.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner, api_command_wrapper
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
parser.add_argument("--username", type=str, default="admin", help="Admin username.")
parser.add_argument("--job", type=str, default="prostate_fedavg", help="Path to job config.")
args = parser.parse_args()
assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
# Initialize the runner
runner = FLAdminAPIRunner(
username=args.username,
admin_dir=args.admin_dir,
debug=False,
)
# Submit job
api_command_wrapper(runner.api.submit_job(args.job))
# finish
runner.api.logout()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/brats18/submit_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import torch
from monai.data import DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SlidingWindowInferer
from monai.metrics import DiceMetric
from monai.networks.nets.segresnet import SegResNet
from monai.transforms import (
Activations,
AsDiscrete,
Compose,
ConvertToMultiChannelBasedOnBratsClassesd,
DivisiblePadd,
EnsureChannelFirstd,
LoadImaged,
NormalizeIntensityd,
Orientationd,
Spacingd,
)
def main():
parser = argparse.ArgumentParser(description="Model Testing")
parser.add_argument("--model_path", type=str)
parser.add_argument("--dataset_base_dir", default="../dataset_brats18/dataset", type=str)
parser.add_argument("--datalist_json_path", default="../dataset_brats18/datalist/site-All.json", type=str)
args = parser.parse_args()
# Set basic settings and paths
dataset_base_dir = args.dataset_base_dir
datalist_json_path = args.datalist_json_path
model_path = args.model_path
infer_roi_size = (240, 240, 160)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set datalists
test_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
print(f"Testing Size: {len(test_list)}")
# Network, optimizer, and loss
model = SegResNet(
blocks_down=[1, 2, 2, 4],
blocks_up=[1, 1, 1],
init_filters=16,
in_channels=4,
out_channels=3,
dropout_prob=0.2,
).to(device)
model_weights = torch.load(model_path)
model_weights = model_weights["model"]
model.load_state_dict(model_weights)
# Inferer, evaluation metric
inferer = SlidingWindowInferer(roi_size=infer_roi_size, sw_batch_size=1, overlap=0.5)
valid_metric = DiceMetric(include_background=True, reduction="mean")
transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
DivisiblePadd(keys=["image", "label"], k=32),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
]
)
transform_post = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
test_dataset = Dataset(data=test_list, transform=transform)
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
# Train
model.eval()
with torch.no_grad():
metric = 0
metric_tc = 0
metric_wt = 0
metric_et = 0
ct = 0
ct_tc = 0
ct_wt = 0
ct_et = 0
for i, batch_data in enumerate(test_loader):
images = batch_data["image"].to(device)
labels = batch_data["label"].to(device)
# Inference
outputs = inferer(images, model)
outputs = transform_post(outputs)
# Compute metric
metric_score = valid_metric(y_pred=outputs, y=labels)
if not np.isnan(metric_score[0][0].item()):
metric += metric_score[0][0].item()
ct += 1
metric_tc += metric_score[0][0].item()
ct_tc += 1
if not np.isnan(metric_score[0][1].item()):
metric += metric_score[0][1].item()
ct += 1
metric_wt += metric_score[0][1].item()
ct_wt += 1
if not np.isnan(metric_score[0][2].item()):
metric += metric_score[0][2].item()
ct += 1
metric_et += metric_score[0][2].item()
ct_et += 1
# compute mean dice over whole validation set
metric_tc /= ct_tc
metric_wt /= ct_wt
metric_et /= ct_et
metric /= ct
print(f"Test Dice: {metric:.4f}, Valid count: {ct}")
print(f"Test Dice TC: {metric_tc:.4f}, Valid count: {ct_tc}")
print(f"Test Dice WT: {metric_wt:.4f}, Valid count: {ct_wt}")
print(f"Test Dice ET: {metric_et:.4f}, Valid count: {ct_et}")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/brats18/result_stat/brats_3d_test_only.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# poc workspace
client_results_root = "../workspace_brats/"
# All sites used the same validation set for Brats, so only 1 site's record is needed
site_num = 1
client_pre = "app_site-"
sites_fl = [str(site + 1) for site in range(site_num)]
# Central vs. FedAvg vs. FedAvg_DP
experiments = {
"brats_central": {"tag": "val_metric_global_model", "site": "All"},
"brats_fedavg": {"tag": "val_metric_global_model"},
"brats_fedavg_dp": {"tag": "val_metric_global_model"},
}
weight = 0.8
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def read_eventfile(filepath, tags=["val_metric_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_metric_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Epoch"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["Dice"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
num_site = len(sites_fl)
i = 1
# add event files
data = {"Config": [], "Epoch": [], "Dice": []}
for site in sites_fl:
# clear data for each site
data = {"Config": [], "Epoch": [], "Dice": []}
for config, exp in experiments.items():
spec_site = exp.get("site", None)
if spec_site is not None:
record_path = os.path.join(
client_results_root + config, "simulate_job", client_pre + spec_site, "events.*"
)
else:
record_path = os.path.join(client_results_root + config, "simulate_job", client_pre + site, "events.*")
eventfile = glob.glob(record_path, recursive=True)
print(record_path, len(eventfile))
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
ax = plt.subplot(1, num_site, i)
ax.set_title(site)
sns.lineplot(x="Epoch", y="Dice", hue="Config", data=data)
# ax.set_xlim([0, 1000])
i = i + 1
plt.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/brats18/result_stat/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# poc workspace
client_results_root = "../workspace_brats"
# All sites used the same validation set, so only 1 site's record is needed
site_num = 1
site_pre = "site-"
# Central vs. FedAvg vs. FedAvg_DP
experiments = {
"brats_central": {"tag": "val_metric_global_model", "site": "All"},
"brats_fedavg": {"tag": "val_metric_global_model"},
"brats_fedavg_dp": {"tag": "val_metric_global_model"},
}
weight = 0.8
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def find_job_id(workdir, fl_app_name="prostate_central"):
"""Find the first matching experiment"""
target_path = os.path.join(workdir, "*", "fl_app.txt")
fl_app_files = glob.glob(target_path, recursive=True)
assert len(fl_app_files) > 0, f"No `fl_app.txt` files found in workdir={workdir}."
for fl_app_file in fl_app_files:
with open(fl_app_file, "r") as f:
_fl_app_name = f.read()
if fl_app_name == _fl_app_name: # alpha will be matched based on value in config file
job_id = os.path.basename(os.path.dirname(fl_app_file))
return job_id
raise ValueError(f"No job id found for fl_app_name={fl_app_name} in workdir={workdir}")
def read_eventfile(filepath, tags=["val_metric_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_metric_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Epoch"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["Dice"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
i = 1
# add event files
data = {"Config": [], "Epoch": [], "Dice": []}
for site in range(site_num):
# clear data for each site
site = site + 1
data = {"Config": [], "Epoch": [], "Dice": []}
for config, exp in experiments.items():
job_id = find_job_id(workdir=client_results_root + "/site-1", fl_app_name=config)
print(f"Found run {job_id} for {config}")
spec_site = exp.get("site", None)
if spec_site is not None:
record_path = os.path.join(client_results_root, site_pre + spec_site, job_id, "*", "events.*")
else:
record_path = os.path.join(client_results_root, site_pre + str(site), job_id, "*", "events.*")
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
ax = plt.subplot(1, site_num, i)
ax.set_title(site)
sns.lineplot(x="Epoch", y="Dice", hue="Config", data=data)
i = i + 1
plt.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/brats18/result_stat/plot_tensorboard_events_poc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: root path containing all jsons
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
datalist_json_path_client = os.path.join(
datalist_json_path,
client_id + ".json",
)
return datalist_json_path_client
| NVFlare-main | examples/advanced/brats18/pt/utils/custom_client_datalist_json_path.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
import torch
import torch.optim as optim
from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SlidingWindowInferer
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets.segresnet import SegResNet
from monai.transforms import (
Activations,
AsDiscrete,
Compose,
ConvertToMultiChannelBasedOnBratsClassesd,
DivisiblePadd,
EnsureChannelFirstd,
LoadImaged,
NormalizeIntensityd,
Orientationd,
RandFlipd,
RandScaleIntensityd,
RandShiftIntensityd,
RandSpatialCropd,
Spacingd,
)
from pt.learners.supervised_learner import SupervisedLearner
from pt.utils.custom_client_datalist_json_path import custom_client_datalist_json_path
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class SupervisedMonaiBratsLearner(SupervisedLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""MONAI Learner for BraTS18 segmentation task.
It inherits from SupervisedLearner.
Args:
train_config_filename: path for config file, this is an addition term for config loading
aggregation_epochs: the number of training epochs for a round.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__(
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.train_config_filename = train_config_filename
self.config_info = None
def train_config(self, fl_ctx: FLContext):
"""MONAI traning configuration
Here, we use a json to specify the needed parameters
"""
# Load training configurations json
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
self.config_info = json.load(file)
# Get the config_info
self.lr = self.config_info["learning_rate"]
self.fedproxloss_mu = self.config_info["fedproxloss_mu"]
cache_rate = self.config_info["cache_dataset"]
dataset_base_dir = self.config_info["dataset_base_dir"]
datalist_json_path = self.config_info["datalist_json_path"]
self.roi_size = self.config_info.get("roi_size", (224, 224, 144))
self.infer_roi_size = self.config_info.get("infer_roi_size", (240, 240, 160))
# Get datalist json
datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
# Set datalist
train_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="training",
base_dir=dataset_base_dir,
)
valid_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
self.log_info(
fl_ctx,
f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = SegResNet(
blocks_down=[1, 2, 2, 4],
blocks_up=[1, 1, 1],
init_filters=16,
in_channels=4,
out_channels=3,
dropout_prob=0.2,
).to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=1e-5)
self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=100)
self.criterion = DiceLoss(
smooth_nr=0,
smooth_dr=1e-5,
squared_pred=True,
to_onehot_y=False,
sigmoid=True,
)
if self.fedproxloss_mu > 0:
self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
self.transform_train = Compose(
[
# load Nifti image
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
RandSpatialCropd(keys=["image", "label"], roi_size=self.roi_size, random_size=False),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=1),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=2),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
RandScaleIntensityd(keys="image", factors=0.1, prob=1.0),
RandShiftIntensityd(keys="image", offsets=0.1, prob=1.0),
]
)
self.transform_valid = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
DivisiblePadd(keys=["image", "label"], k=32),
Orientationd(keys=["image", "label"], axcodes="RAS"),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
]
)
self.transform_post = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
if cache_rate > 0.0:
self.train_dataset = CacheDataset(
data=train_list,
transform=self.transform_train,
cache_rate=cache_rate,
num_workers=1,
)
self.valid_dataset = CacheDataset(
data=valid_list,
transform=self.transform_valid,
cache_rate=cache_rate,
num_workers=1,
)
else:
self.train_dataset = Dataset(
data=train_list,
transform=self.transform_train,
)
self.valid_dataset = Dataset(
data=valid_list,
transform=self.transform_valid,
)
self.train_loader = DataLoader(
self.train_dataset,
batch_size=1,
shuffle=True,
num_workers=1,
)
self.valid_loader = DataLoader(
self.valid_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
# Set inferer and evaluation metric
self.inferer = SlidingWindowInferer(roi_size=self.infer_roi_size, sw_batch_size=1, overlap=0.5)
self.valid_metric = DiceMetric(include_background=True, reduction="mean")
# Brats has 3 classes, so the metric computation needs some change
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
record_epoch=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
ct = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
for sub_region in range(3):
metric_score_single = metric_score[0][sub_region].item()
if not np.isnan(metric_score_single):
metric += metric_score_single
ct += 1
# compute mean dice over whole validation set
metric /= ct
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric, record_epoch)
return metric
| NVFlare-main | examples/advanced/brats18/pt/learners/supervised_monai_brats_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from abc import abstractmethod
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Simple Supervised Trainer.
This provides the basic functionality of a local learner: perform before-train validation on
global model at the beginning of each round, perform local training, and send the updated weights.
No model will be saved locally, tensorboard record for local loss and global model validation score.
Enabled both FedAvg and FedProx
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# FedProx related
self.fedproxloss_mu = 0.0
self.criterion_prox = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
self.train_config(fl_ctx)
@abstractmethod
def train_config(self, fl_ctx: FLContext):
"""Traning configurations customized to individual tasks
This can be specified / loaded in any ways
as long as they are made available for further training and validation
some potential items include but not limited to:
self.lr
self.fedproxloss_mu
self.model
self.device
self.optimizer
self.criterion
self.transform_train
self.transform_valid
self.transform_post
self.train_loader
self.valid_loader
self.inferer
self.valid_metric
"""
raise NotImplementedError
@abstractmethod
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
model_global,
abort_signal: Signal,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader: image / label
Compute outputs with self.model
Compute loss with self.criterion
Add fedprox loss
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
record_epoch=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric_score, record_epoch)
return metric
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline with potential HE and fedprox functionalities
Get global model weights (potentially with HE)
Prepare for fedprox loss
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss
if self.fedproxloss_mu > 0:
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
else:
model_global = None
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Validation on local data
Return validation score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
record_epoch=self.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: global_metric}, meta={})
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | examples/advanced/brats18/pt/learners/supervised_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# simulator workspace
client_results_root = "./workspaces/xgboost_workspace_"
client_num_list = [5, 20]
client_pre = "app_site-"
centralized_path = "./workspaces/centralized_1_1/events.*"
# bagging and cyclic need different handle
experiments_bagging = {
5: {
"5_bagging_uniform_split_uniform_lr": {"tag": "AUC"},
"5_bagging_exponential_split_uniform_lr": {"tag": "AUC"},
"5_bagging_exponential_split_scaled_lr": {"tag": "AUC"},
},
20: {
"20_bagging_uniform_split_uniform_lr": {"tag": "AUC"},
"20_bagging_square_split_uniform_lr": {"tag": "AUC"},
"20_bagging_square_split_scaled_lr": {"tag": "AUC"},
},
}
experiments_cyclic = {
5: {
"5_cyclic_uniform_split_uniform_lr": {"tag": "AUC"},
"5_cyclic_exponential_split_uniform_lr": {"tag": "AUC"},
},
20: {
"20_cyclic_uniform_split_uniform_lr": {"tag": "AUC"},
"20_cyclic_square_split_uniform_lr": {"tag": "AUC"},
},
}
weight = 0.0
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def read_eventfile(filepath, tags=["AUC"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="AUC"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Round"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["AUC"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
for client_num in client_num_list:
plt.figure
plt.title(f"{client_num} client experiments")
# add event files
data = {"Config": [], "Round": [], "AUC": []}
# add centralized result
eventfile = glob.glob(centralized_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!" + eventfile
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, "centralized", eventfile, tag="AUC")
# pick first client for bagging experiments
site = 1
for config, exp in experiments_bagging[client_num].items():
record_path = os.path.join(client_results_root + config, "simulate_job", client_pre + str(site), "events.*")
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
# Combine all clients' records for cyclic experiments
for site in range(1, client_num + 1):
for config, exp in experiments_cyclic[client_num].items():
record_path = os.path.join(
client_results_root + config, "simulate_job", client_pre + str(site), "events.*"
)
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, f"No unique event file found under {record_path}!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
sns.lineplot(x="Round", y="AUC", hue="Config", data=data)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/xgboost/tree-based/utils/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import xgboost as xgb
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
def _read_higgs_with_pandas(data_path, start: int, end: int):
data_size = end - start
data = pd.read_csv(data_path, header=None, skiprows=start, nrows=data_size)
data_num = data.shape[0]
# split to feature and label
x = data.iloc[:, 1:].copy()
y = data.iloc[:, 0].copy()
return x, y, data_num
class HIGGSDataLoader(XGBDataLoader):
def __init__(self, data_split_filename):
"""Reads HIGGS dataset and return XGB data matrix.
Args:
data_split_filename: file name to data splits
"""
self.data_split_filename = data_split_filename
def load_data(self, client_id: str):
with open(self.data_split_filename, "r") as file:
data_split = json.load(file)
data_path = data_split["data_path"]
data_index = data_split["data_index"]
# check if site_id and "valid" in the mapping dict
if client_id not in data_index.keys():
raise ValueError(
f"Data does not contain Client {client_id} split",
)
if "valid" not in data_index.keys():
raise ValueError(
"Data does not contain Validation split",
)
site_index = data_index[client_id]
valid_index = data_index["valid"]
# training
x_train, y_train, total_train_data_num = _read_higgs_with_pandas(
data_path=data_path, start=site_index["start"], end=site_index["end"]
)
dmat_train = xgb.DMatrix(x_train, label=y_train)
# validation
x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas(
data_path=data_path, start=valid_index["start"], end=valid_index["end"]
)
dmat_valid = xgb.DMatrix(x_valid, label=y_valid)
return dmat_train, dmat_valid
| NVFlare-main | examples/advanced/xgboost/tree-based/jobs/bagging_base/app/custom/higgs_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import xgboost as xgb
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
def _read_higgs_with_pandas(data_path, start: int, end: int):
data_size = end - start
data = pd.read_csv(data_path, header=None, skiprows=start, nrows=data_size)
data_num = data.shape[0]
# split to feature and label
x = data.iloc[:, 1:].copy()
y = data.iloc[:, 0].copy()
return x, y, data_num
class HIGGSDataLoader(XGBDataLoader):
def __init__(self, data_split_filename):
"""Reads HIGGS dataset and return XGB data matrix.
Args:
data_split_filename: file name to data splits
"""
self.data_split_filename = data_split_filename
def load_data(self, client_id: str):
with open(self.data_split_filename, "r") as file:
data_split = json.load(file)
data_path = data_split["data_path"]
data_index = data_split["data_index"]
# check if site_id and "valid" in the mapping dict
if client_id not in data_index.keys():
raise ValueError(
f"Data does not contain Client {client_id} split",
)
if "valid" not in data_index.keys():
raise ValueError(
"Data does not contain Validation split",
)
site_index = data_index[client_id]
valid_index = data_index["valid"]
# training
x_train, y_train, total_train_data_num = _read_higgs_with_pandas(
data_path=data_path, start=site_index["start"], end=site_index["end"]
)
dmat_train = xgb.DMatrix(x_train, label=y_train)
# validation
x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas(
data_path=data_path, start=valid_index["start"], end=valid_index["end"]
)
dmat_valid = xgb.DMatrix(x_valid, label=y_valid)
return dmat_train, dmat_valid
| NVFlare-main | examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/custom/higgs_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import numpy as np
def data_split_args_parser():
parser = argparse.ArgumentParser(description="Generate data split for dataset")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument("--size_total", type=int, help="Total number of instances")
parser.add_argument(
"--size_valid", type=int, help="Validation size, the first N instances to be treated as validation data"
)
parser.add_argument(
"--split_method",
type=str,
default="uniform",
choices=["uniform", "linear", "square", "exponential"],
help="How to split the dataset",
)
parser.add_argument("--out_path", type=str, default="~/dataset", help="Output path for the data split json file")
return parser
def split_num_proportion(n, site_num, option: str):
split = []
if option == "uniform":
ratio_vec = np.ones(site_num)
elif option == "linear":
ratio_vec = np.linspace(1, site_num, num=site_num)
elif option == "square":
ratio_vec = np.square(np.linspace(1, site_num, num=site_num))
elif option == "exponential":
ratio_vec = np.exp(np.linspace(1, site_num, num=site_num))
else:
raise ValueError("Split method not implemented!")
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def main():
parser = data_split_args_parser()
args = parser.parse_args()
json_data = {"data_path": args.data_path, "data_index": {"valid": {"start": 0, "end": args.size_valid}}}
site_size = split_num_proportion((args.size_total - args.size_valid), args.site_num, args.split_method)
for site in range(args.site_num):
site_id = args.site_name_prefix + str(site + 1)
idx_start = args.size_valid + sum(site_size[:site])
idx_end = args.size_valid + sum(site_size[: site + 1])
json_data["data_index"][site_id] = {"start": idx_start, "end": idx_end}
if not os.path.exists(args.out_path):
os.makedirs(args.out_path, exist_ok=True)
for site in range(args.site_num):
output_file = os.path.join(args.out_path, f"data_{args.site_name_prefix}{site + 1}.json")
with open(output_file, "w") as f:
json.dump(json_data, f, indent=4)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/xgboost/utils/prepare_data_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pathlib
import shutil
from nvflare.apis.fl_constant import JobConstants
JOB_CONFIGS_ROOT = "jobs"
MODE_ALGO_MAP = {"bagging": "tree-based", "cyclic": "tree-based", "histogram": "histogram-based"}
def job_config_args_parser():
parser = argparse.ArgumentParser(description="generate train configs for HIGGS dataset")
parser.add_argument(
"--data_root",
type=str,
default="/tmp/nvflare/xgboost_higgs_dataset",
help="Path to dataset config files for each site",
)
parser.add_argument("--site_num", type=int, default=5, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument("--round_num", type=int, default=100, help="Total number of training rounds")
parser.add_argument(
"--training_mode", type=str, default="bagging", choices=list(MODE_ALGO_MAP.keys()), help="Training mode"
)
parser.add_argument("--split_method", type=str, default="uniform", help="How to split the dataset")
parser.add_argument("--lr_mode", type=str, default="uniform", help="Whether to use uniform or scaled shrinkage")
parser.add_argument("--nthread", type=int, default=16, help="nthread for xgboost")
parser.add_argument(
"--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist or gpu_hist for best perf"
)
return parser
def _read_json(filename):
if not os.path.isfile(filename):
raise ValueError(f"{filename} does not exist!")
with open(filename, "r") as f:
return json.load(f)
def _write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def _get_job_name(args) -> str:
return (
"higgs_"
+ str(args.site_num)
+ "_"
+ args.training_mode
+ "_"
+ args.split_method
+ "_split"
+ "_"
+ args.lr_mode
+ "_lr"
)
def _get_data_split_name(args, site_name: str) -> str:
return os.path.join(args.data_root, f"{args.site_num}_{args.split_method}", f"data_{site_name}.json")
def _get_src_job_dir(training_mode):
base_job_map = {
"bagging": "bagging_base",
"cyclic": "cyclic_base",
"histogram": "base",
}
return pathlib.Path(MODE_ALGO_MAP[training_mode]) / JOB_CONFIGS_ROOT / base_job_map[training_mode]
def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict:
deploy_map = {"app_server": ["server"]}
for i in range(1, num_sites + 1):
deploy_map[f"app_{site_name_prefix}{i}"] = [f"{site_name_prefix}{i}"]
return deploy_map
def _update_meta(meta: dict, args):
name = _get_job_name(args)
meta["name"] = name
meta["deploy_map"] = _gen_deploy_map(args.site_num, args.site_name_prefix)
meta["min_clients"] = args.site_num
def _get_lr_scale_from_split_json(data_split: dict):
split = {}
total_data_num = 0
for k, v in data_split["data_index"].items():
if k == "valid":
continue
data_num = int(v["end"] - v["start"])
total_data_num += data_num
split[k] = data_num
lr_scales = {}
for k in split:
lr_scales[k] = split[k] / total_data_num
return lr_scales
def _update_client_config(config: dict, args, lr_scale, site_name: str):
data_split_name = _get_data_split_name(args, site_name)
if args.training_mode == "bagging" or args.training_mode == "cyclic":
# update client config
config["components"][0]["args"]["data_split_filename"] = data_split_name
config["executors"][0]["executor"]["args"]["lr_scale"] = lr_scale
config["executors"][0]["executor"]["args"]["lr_mode"] = args.lr_mode
config["executors"][0]["executor"]["args"]["nthread"] = args.nthread
config["executors"][0]["executor"]["args"]["tree_method"] = args.tree_method
config["executors"][0]["executor"]["args"]["training_mode"] = args.training_mode
num_client_bagging = 1
if args.training_mode == "bagging":
num_client_bagging = args.site_num
config["executors"][0]["executor"]["args"]["num_client_bagging"] = num_client_bagging
else:
config["components"][0]["args"]["data_split_filename"] = data_split_name
config["executors"][0]["executor"]["args"]["xgb_params"]["nthread"] = args.nthread
config["executors"][0]["executor"]["args"]["xgb_params"]["tree_method"] = args.tree_method
def _update_server_config(config: dict, args):
if args.training_mode == "bagging":
config["workflows"][0]["args"]["num_rounds"] = args.round_num + 1
config["workflows"][0]["args"]["min_clients"] = args.site_num
elif args.training_mode == "cyclic":
config["workflows"][0]["args"]["num_rounds"] = int(args.round_num / args.site_num)
def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name):
dst_path = dst_job_path / dst_app_name / "custom"
os.makedirs(dst_path, exist_ok=True)
src_path = src_job_path / src_app_name / "custom"
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
def create_server_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
server_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.SERVER_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
_update_server_config(server_config, args)
server_config_filename = dst_config_path / JobConstants.SERVER_JOB_CONFIG
_write_json(server_config, server_config_filename)
def create_client_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
client_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.CLIENT_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
# get lr scale
data_split_name = _get_data_split_name(args, site_name)
data_split = _read_json(data_split_name)
lr_scales = _get_lr_scale_from_split_json(data_split)
# adjust file contents according to each job's specs
_update_client_config(client_config, args, lr_scales[site_name], site_name)
client_config_filename = dst_config_path / JobConstants.CLIENT_JOB_CONFIG
_write_json(client_config, client_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def main():
parser = job_config_args_parser()
args = parser.parse_args()
job_name = _get_job_name(args)
src_job_path = _get_src_job_dir(args.training_mode)
# create a new job
dst_job_path = pathlib.Path(MODE_ALGO_MAP[args.training_mode]) / JOB_CONFIGS_ROOT / job_name
if not os.path.exists(dst_job_path):
os.makedirs(dst_job_path)
# update meta
meta_config_dst = dst_job_path / JobConstants.META_FILE
meta_config = _read_json(src_job_path / JobConstants.META_FILE)
_update_meta(meta_config, args)
_write_json(meta_config, meta_config_dst)
# create server side app
create_server_app(
src_job_path=src_job_path, src_app_name="app", dst_job_path=dst_job_path, site_name="server", args=args
)
# create client side app
for i in range(1, args.site_num + 1):
create_client_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name=f"{args.site_name_prefix}{i}",
args=args,
)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/xgboost/utils/prepare_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import tempfile
import time
import pandas as pd
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from torch.utils.tensorboard import SummaryWriter
def xgboost_args_parser():
parser = argparse.ArgumentParser(description="Centralized XGBoost training with random forest options")
parser.add_argument("--data_path", type=str, default="./dataset/HIGGS_UCI.csv", help="path to dataset file")
parser.add_argument("--num_parallel_tree", type=int, default=1, help="num_parallel_tree for random forest setting")
parser.add_argument("--subsample", type=float, default=1, help="subsample for random forest setting")
parser.add_argument("--num_rounds", type=int, default=100, help="number of boosting rounds")
parser.add_argument("--workspace_root", type=str, default="workspaces", help="workspaces root")
parser.add_argument("--tree_method", type=str, default="hist", help="tree_method")
parser.add_argument("--train_in_one_session", action="store_true", help="whether to train in one session")
return parser
def prepare_higgs(data_path: str):
higgs = pd.read_csv(data_path, header=None)
print(higgs.info())
print(higgs.head())
total_data_num = higgs.shape[0]
print(f"Total data count: {total_data_num}")
# split to feature and label
X_higgs = higgs.iloc[:, 1:]
y_higgs = higgs.iloc[:, 0]
print(y_higgs.value_counts())
return X_higgs, y_higgs
def train_one_by_one(train_data, val_data, xgb_params, num_rounds, val_label, writer: SummaryWriter):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_model_path = os.path.join(tmp_dir, "model.json")
# Round 0
print("Round: 0 Base ", end="")
bst = xgb.train(
xgb_params, train_data, num_boost_round=1, evals=[(val_data, "validate"), (train_data, "train")]
)
bst.save_model(tmp_model_path)
for r in range(1, num_rounds):
# Validate the last round's model
bst_last = xgb.Booster(xgb_params, model_file=tmp_model_path)
y_pred = bst_last.predict(val_data)
roc = roc_auc_score(val_label, y_pred)
print(f"Round: {bst_last.num_boosted_rounds()} model testing AUC {roc}")
writer.add_scalar("AUC", roc, r - 1)
# Train new model
print(f"Round: {r} Base ", end="")
bst = xgb.train(
xgb_params,
train_data,
num_boost_round=1,
xgb_model=tmp_model_path,
evals=[(val_data, "validate"), (train_data, "train")],
)
bst.save_model(tmp_model_path)
return bst
def get_training_parameters(args):
# use logistic regression loss for binary classification
# use auc as metric
param = {
"objective": "binary:logistic",
"eta": 0.1,
"max_depth": 8,
"eval_metric": "auc",
"nthread": 16,
"num_parallel_tree": args.num_parallel_tree,
"subsample": args.subsample,
"tree_method": args.tree_method,
}
return param
def main():
parser = xgboost_args_parser()
args = parser.parse_args()
# Specify training params
if args.train_in_one_session:
model_name = "centralized_simple_" + str(args.num_parallel_tree) + "_" + str(args.subsample)
else:
model_name = "centralized_" + str(args.num_parallel_tree) + "_" + str(args.subsample)
data_path = args.data_path
num_rounds = args.num_rounds
valid_num = 1000000
exp_root = os.path.join(args.workspace_root, model_name)
# Set mode file paths
model_path = os.path.join(exp_root, "model.json")
# Set tensorboard output
writer = SummaryWriter(exp_root)
# Load data
start = time.time()
X_higgs, y_higgs = prepare_higgs(data_path)
end = time.time()
lapse_time = end - start
print(f"Data loading time: {lapse_time}")
# construct training and validation xgboost DMatrix
dmat_higgs = xgb.DMatrix(X_higgs, label=y_higgs)
dmat_valid = dmat_higgs.slice(X_higgs.index[0:valid_num])
dmat_train = dmat_higgs.slice(X_higgs.index[valid_num:])
# setup parameters for xgboost
xgb_params = get_training_parameters(args)
# xgboost training
start = time.time()
if args.train_in_one_session:
bst = xgb.train(
xgb_params, dmat_train, num_boost_round=num_rounds, evals=[(dmat_valid, "validate"), (dmat_train, "train")]
)
else:
bst = train_one_by_one(
train_data=dmat_train,
val_data=dmat_valid,
xgb_params=xgb_params,
num_rounds=num_rounds,
val_label=y_higgs[0:valid_num],
writer=writer,
)
bst.save_model(model_path)
end = time.time()
lapse_time = end - start
print(f"Training time: {lapse_time}")
# test model
bst = xgb.Booster(xgb_params, model_file=model_path)
y_pred = bst.predict(dmat_valid)
roc = roc_auc_score(y_higgs[0:valid_num], y_pred)
print(f"Base model: {roc}")
writer.add_scalar("AUC", roc, num_rounds - 1)
writer.close()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/xgboost/utils/baseline_centralized.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import xgboost as xgb
from nvflare.app_opt.xgboost.data_loader import XGBDataLoader
def _read_higgs_with_pandas(data_path, start: int, end: int):
data_size = end - start
data = pd.read_csv(data_path, header=None, skiprows=start, nrows=data_size)
data_num = data.shape[0]
# split to feature and label
x = data.iloc[:, 1:].copy()
y = data.iloc[:, 0].copy()
return x, y, data_num
class HIGGSDataLoader(XGBDataLoader):
def __init__(self, data_split_filename):
"""Reads HIGGS dataset and return XGB data matrix.
Args:
data_split_filename: file name to data splits
"""
self.data_split_filename = data_split_filename
def load_data(self, client_id: str):
with open(self.data_split_filename, "r") as file:
data_split = json.load(file)
data_path = data_split["data_path"]
data_index = data_split["data_index"]
# check if site_id and "valid" in the mapping dict
if client_id not in data_index.keys():
raise ValueError(
f"Data does not contain Client {client_id} split",
)
if "valid" not in data_index.keys():
raise ValueError(
"Data does not contain Validation split",
)
site_index = data_index[client_id]
valid_index = data_index["valid"]
# training
x_train, y_train, total_train_data_num = _read_higgs_with_pandas(
data_path=data_path, start=site_index["start"], end=site_index["end"]
)
dmat_train = xgb.DMatrix(x_train, label=y_train)
# validation
x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas(
data_path=data_path, start=valid_index["start"], end=valid_index["end"]
)
dmat_valid = xgb.DMatrix(x_valid, label=y_valid)
return dmat_train, dmat_valid
| NVFlare-main | examples/advanced/xgboost/histogram-based/jobs/base/app/custom/higgs_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import numpy as np
def data_split_args_parser():
parser = argparse.ArgumentParser(description="Generate data split for dataset")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument("--size_total", type=int, help="Total number of instances")
parser.add_argument(
"--size_valid", type=int, help="Validation size, the first N instances to be treated as validation data"
)
parser.add_argument(
"--split_method",
type=str,
default="uniform",
choices=["uniform", "linear", "square", "exponential"],
help="How to split the dataset",
)
parser.add_argument("--out_path", type=str, default="~/dataset", help="Output path for the data split json file")
return parser
def split_num_proportion(n, site_num, option: str):
split = []
if option == "uniform":
ratio_vec = np.ones(site_num)
elif option == "linear":
ratio_vec = np.linspace(1, site_num, num=site_num)
elif option == "square":
ratio_vec = np.square(np.linspace(1, site_num, num=site_num))
elif option == "exponential":
ratio_vec = np.exp(np.linspace(1, site_num, num=site_num))
else:
raise ValueError("Split method not implemented!")
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def main():
parser = data_split_args_parser()
args = parser.parse_args()
json_data = {"data_path": args.data_path, "data_index": {"valid": {"start": 0, "end": args.size_valid}}}
site_size = split_num_proportion((args.size_total - args.size_valid), args.site_num, args.split_method)
for site in range(args.site_num):
site_id = args.site_name_prefix + str(site + 1)
idx_start = args.size_valid + sum(site_size[:site])
idx_end = args.size_valid + sum(site_size[: site + 1])
json_data["data_index"][site_id] = {"start": idx_start, "end": idx_end}
if not os.path.exists(args.out_path):
os.makedirs(args.out_path, exist_ok=True)
for site in range(args.site_num):
output_file = os.path.join(args.out_path, f"data_{args.site_name_prefix}{site + 1}.json")
with open(output_file, "w") as f:
json.dump(json_data, f, indent=4)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/random_forest/utils/prepare_data_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pathlib
import shutil
from nvflare.apis.fl_constant import JobConstants
JOB_CONFIGS_ROOT = "jobs"
BASE_FOLDER = "random_forest_base"
def job_config_args_parser():
parser = argparse.ArgumentParser(description="generate train configs for HIGGS dataset")
parser.add_argument(
"--data_split_root",
type=str,
default="/tmp/nvflare/random_forest/HIGGS/data_splits",
help="Path to dataset config files for each site",
)
parser.add_argument("--site_num", type=int, default=5, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument("--num_local_parallel_tree", type=int, default=20, help="Number of local parallel trees")
parser.add_argument("--local_subsample", type=float, default=0.8, help="Local random forest subsample rate")
parser.add_argument("--split_method", type=str, default="uniform", help="How to split the dataset")
parser.add_argument("--lr_mode", type=str, default="uniform", help="Whether to use uniform or scaled shrinkage")
parser.add_argument("--nthread", type=int, default=16, help="nthread for xgboost")
parser.add_argument(
"--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist or gpu_hist for best perf"
)
return parser
def _read_json(filename):
if not os.path.isfile(filename):
raise ValueError(f"{filename} does not exist!")
with open(filename, "r") as f:
return json.load(f)
def _write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def _get_job_name(args) -> str:
return (
"higgs_"
+ str(args.site_num)
+ "_"
+ str(args.local_subsample)
+ "_"
+ args.split_method
+ "_split"
+ "_"
+ args.lr_mode
+ "_lr"
)
def _get_data_split_name(args, site_name: str) -> str:
return os.path.join(args.data_split_root, f"{args.site_num}_{args.split_method}", f"data_{site_name}.json")
def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict:
deploy_map = {"app_server": ["server"]}
for i in range(1, num_sites + 1):
deploy_map[f"app_{site_name_prefix}{i}"] = [f"{site_name_prefix}{i}"]
return deploy_map
def _update_meta(meta: dict, args):
name = _get_job_name(args)
meta["name"] = name
meta["deploy_map"] = _gen_deploy_map(args.site_num, args.site_name_prefix)
meta["min_clients"] = args.site_num
def _get_lr_scale_from_split_json(data_split: dict):
split = {}
total_data_num = 0
for k, v in data_split["data_index"].items():
if k == "valid":
continue
data_num = int(v["end"] - v["start"])
total_data_num += data_num
split[k] = data_num
lr_scales = {}
for k in split:
lr_scales[k] = split[k] / total_data_num
return lr_scales
def _update_client_config(config: dict, args, lr_scale, site_name: str):
data_split_name = _get_data_split_name(args, site_name)
# update client config
config["executors"][0]["executor"]["args"]["data_split_filename"] = data_split_name
config["executors"][0]["executor"]["args"]["num_client_bagging"] = args.site_num
config["executors"][0]["executor"]["args"]["num_local_parallel_tree"] = args.num_local_parallel_tree
config["executors"][0]["executor"]["args"]["local_subsample"] = args.local_subsample
config["executors"][0]["executor"]["args"]["lr_scale"] = lr_scale
config["executors"][0]["executor"]["args"]["lr_mode"] = args.lr_mode
config["executors"][0]["executor"]["args"]["nthread"] = args.nthread
config["executors"][0]["executor"]["args"]["tree_method"] = args.tree_method
def _update_server_config(config: dict, args):
config["workflows"][0]["args"]["min_clients"] = args.site_num
def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name):
dst_path = dst_job_path / dst_app_name / "custom"
os.makedirs(dst_path, exist_ok=True)
src_path = src_job_path / src_app_name / "custom"
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
def create_server_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
server_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.SERVER_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
_update_server_config(server_config, args)
server_config_filename = dst_config_path / JobConstants.SERVER_JOB_CONFIG
_write_json(server_config, server_config_filename)
def create_client_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
client_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.CLIENT_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
# get lr scale
data_split_name = _get_data_split_name(args, site_name)
data_split = _read_json(data_split_name)
lr_scales = _get_lr_scale_from_split_json(data_split)
# adjust file contents according to each job's specs
_update_client_config(client_config, args, lr_scales[site_name], site_name)
client_config_filename = dst_config_path / JobConstants.CLIENT_JOB_CONFIG
_write_json(client_config, client_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def main():
parser = job_config_args_parser()
args = parser.parse_args()
job_name = _get_job_name(args)
src_job_path = pathlib.Path(JOB_CONFIGS_ROOT) / BASE_FOLDER
# create a new job
dst_job_path = pathlib.Path(JOB_CONFIGS_ROOT) / job_name
if not os.path.exists(dst_job_path):
os.makedirs(dst_job_path)
# update meta
meta_config_dst = dst_job_path / JobConstants.META_FILE
meta_config = _read_json(src_job_path / JobConstants.META_FILE)
_update_meta(meta_config, args)
_write_json(meta_config, meta_config_dst)
# create server side app
create_server_app(
src_job_path=src_job_path, src_app_name="app", dst_job_path=dst_job_path, site_name="server", args=args
)
# create client side app
for i in range(1, args.site_num + 1):
create_client_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name=f"{args.site_name_prefix}{i}",
args=args,
)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/random_forest/utils/prepare_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
import xgboost as xgb
from sklearn.metrics import roc_auc_score
def model_validation_args_parser():
parser = argparse.ArgumentParser(description="Validate model performance")
parser.add_argument(
"--data_path",
type=str,
help="Path to dataset file",
)
parser.add_argument(
"--model_path",
type=str,
help="Path to model file",
)
parser.add_argument(
"--size_valid", type=int, help="Validation size, the first N instances to be treated as validation data"
)
parser.add_argument(
"--num_trees",
type=int,
help="Total number of trees",
)
parser.add_argument(
"--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist or gpu_hist for best perf"
)
return parser
def main():
parser = model_validation_args_parser()
args = parser.parse_args()
data_path = args.data_path
model_path = args.model_path
num_trees = args.num_trees
param = {}
param["objective"] = "binary:logistic"
param["eta"] = 0.1
param["max_depth"] = 8
param["eval_metric"] = "auc"
param["nthread"] = 16
param["num_parallel_tree"] = num_trees
# get validation data
size_valid = args.size_valid
data = pd.read_csv(data_path, header=None, nrows=size_valid)
# split to feature and label
X = data.iloc[:, 1:]
y = data.iloc[:, 0]
dmat = xgb.DMatrix(X, label=y)
# validate model performance
bst = xgb.Booster(param, model_file=model_path)
y_pred = bst.predict(dmat)
auc = roc_auc_score(y, y_pred)
print(f"AUC over first {size_valid} instances is: {auc}")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/random_forest/utils/model_validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import xgboost as xgb
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.xgboost.tree_based.executor import FedXGBTreeExecutor
def _read_HIGGS_with_pandas(data_path, start: int, end: int):
data_size = end - start
data = pd.read_csv(data_path, header=None, skiprows=start, nrows=data_size)
data_num = data.shape[0]
# split to feature and label
x = data.iloc[:, 1:].copy()
y = data.iloc[:, 0].copy()
return x, y, data_num
class FedXGBTreeHiggsExecutor(FedXGBTreeExecutor):
def __init__(
self,
data_split_filename,
training_mode,
lr_scale,
num_client_bagging: int = 1,
lr_mode: str = "uniform",
local_model_path: str = "model.json",
global_model_path: str = "model_global.json",
learning_rate: float = 0.1,
objective: str = "binary:logistic",
num_local_parallel_tree: int = 1,
local_subsample: float = 0.8,
max_depth: int = 8,
eval_metric: str = "auc",
nthread: int = 16,
tree_method: str = "hist",
train_task_name: str = AppConstants.TASK_TRAIN,
):
super().__init__(
training_mode=training_mode,
num_client_bagging=num_client_bagging,
lr_scale=lr_scale,
lr_mode=lr_mode,
local_model_path=local_model_path,
global_model_path=global_model_path,
learning_rate=learning_rate,
objective=objective,
num_local_parallel_tree=num_local_parallel_tree,
local_subsample=local_subsample,
max_depth=max_depth,
eval_metric=eval_metric,
nthread=nthread,
tree_method=tree_method,
train_task_name=train_task_name,
)
self.data_split_filename = data_split_filename
def load_data(self):
with open(self.data_split_filename) as file:
data_split = json.load(file)
data_path = data_split["data_path"]
data_index = data_split["data_index"]
# check if site_id and "valid" in the mapping dict
if self.client_id not in data_index.keys():
raise ValueError(
f"Dict of data_index does not contain Client {self.client_id} split",
)
if "valid" not in data_index.keys():
raise ValueError(
"Dict of data_index does not contain Validation split",
)
site_index = data_index[self.client_id]
valid_index = data_index["valid"]
# training
X_train, y_train, total_train_data_num = _read_HIGGS_with_pandas(
data_path=data_path, start=site_index["start"], end=site_index["end"]
)
dmat_train = xgb.DMatrix(X_train, label=y_train)
# validation
X_valid, y_valid, total_valid_data_num = _read_HIGGS_with_pandas(
data_path=data_path, start=valid_index["start"], end=valid_index["end"]
)
dmat_valid = xgb.DMatrix(X_valid, label=y_valid)
return dmat_train, dmat_valid
| NVFlare-main | examples/advanced/random_forest/jobs/random_forest_base/app/custom/higgs_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.apis.filter import ContentBlockedException, Filter
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
log = logging.getLogger(__name__)
class TestFilter(Filter):
def __init__(self, name, block=False):
self.name = name
self.block = block
def process(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
if self.block:
log.info(f"Filter {self.name} blocked the content")
raise ContentBlockedException("Content blocked by filter " + self.name)
log.info(f"Filter {self.name} is invoked")
return shareable
| NVFlare-main | examples/advanced/federated-policies/policies/site_b/custom/test_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import pandas as pd
def data_split_args_parser():
parser = argparse.ArgumentParser(description="Generate data split for dataset")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--num_clients", type=int, help="Total number of clients")
parser.add_argument("--random_seed", type=int, help="Random seed")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
return parser
def split_df_by_num(df, num=1):
df_len = df.shape[0]
df_1_len = num
idx = list(range(df_len))
np.random.shuffle(idx)
df_1 = df.iloc[idx[:df_1_len]]
df_2 = df.iloc[idx[df_1_len:]]
df_1.reset_index(drop=True, inplace=True)
df_2.reset_index(drop=True, inplace=True)
return df_1, df_2
def main():
parser = data_split_args_parser()
args = parser.parse_args()
num_clients = args.num_clients
data_path = args.data_path
site_name_prefix = args.site_name_prefix
np.random.seed(args.random_seed)
for mode in ["train", "dev"]:
saved_name = "val" if mode == "dev" else mode
df = pd.read_csv(os.path.join(data_path, mode + ".csv"))
client_size = int(df.shape[0] / num_clients)
os.makedirs(f"{data_path}/{num_clients}_split", exist_ok=True)
for i in range(num_clients):
if i != num_clients - 1:
client_df, df = split_df_by_num(df, client_size)
else:
client_df = df
print(df.shape, client_df.shape)
# split into train, test, val
client_df.to_csv(f"{data_path}/{num_clients}_split/{site_name_prefix}{i + 1}_{saved_name}.csv")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/nlp-ner/utils/data_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pandas as pd
import torch
from custom.models.nlp_models import BertModel, GPTModel
from custom.utils.data_sequence import DataSequence
from seqeval.metrics import classification_report
from torch.utils.data import DataLoader
os.environ["TOKENIZERS_PARALLELISM"] = "False"
def data_split_args_parser():
parser = argparse.ArgumentParser(description="Perform model testing by loading the best global model")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--model_path", type=str, help="Path to workspace server folder")
parser.add_argument("--num_labels", type=int, help="Number of labels for the candidate dataset")
parser.add_argument("--model_name", type=str, default="bert-base-uncased", help="Model name")
return parser
if __name__ == "__main__":
parser = data_split_args_parser()
args = parser.parse_args()
device = torch.device("cuda")
model_path = args.model_path
data_path = args.data_path
num_labels = args.num_labels
model_name = args.model_name
ignore_token = -100
df_test = pd.read_csv(os.path.join(data_path, "test.csv"))
# label and id conversion
labels = []
for x in df_test["labels"].values:
labels.extend(x.split(" "))
unique_labels = set(labels)
labels_to_ids = {k: v for v, k in enumerate(sorted(unique_labels))}
ids_to_labels = {v: k for v, k in enumerate(sorted(unique_labels))}
# model
if model_name == "bert-base-uncased":
model = BertModel(model_name=model_name, num_labels=num_labels).to(device)
elif model_name == "gpt2":
model = GPTModel(model_name=model_name, num_labels=num_labels).to(device)
else:
raise ValueError("model not supported")
model_weights = torch.load(os.path.join(model_path, "best_FL_global_model.pt"))
model.load_state_dict(state_dict=model_weights["model"])
tokenizer = model.tokenizer
# data
test_dataset = DataSequence(df_test, labels_to_ids, tokenizer=tokenizer, ignore_token=ignore_token)
test_loader = DataLoader(test_dataset, num_workers=4, batch_size=64, shuffle=False)
# validate
model.eval()
with torch.no_grad():
total_acc_test, total_loss_test, test_total = 0, 0, 0
test_y_pred, test_y_true = [], []
for test_data, test_label in test_loader:
test_label = test_label.to(device)
test_total += test_label.shape[0]
mask = test_data["attention_mask"].squeeze(1).to(device)
input_id = test_data["input_ids"].squeeze(1).to(device)
loss, logits = model(input_id, mask, test_label)
for i in range(logits.shape[0]):
# remove pad tokens
logits_clean = logits[i][test_label[i] != ignore_token]
label_clean = test_label[i][test_label[i] != ignore_token]
# calcluate acc and store prediciton and true labels
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_test += acc.item()
test_y_pred.append([ids_to_labels[x.item()] for x in predictions])
test_y_true.append([ids_to_labels[x.item()] for x in label_clean])
# metric summary
summary = classification_report(y_true=test_y_true, y_pred=test_y_pred, zero_division=0)
print(summary)
| NVFlare-main | examples/advanced/nlp-ner/utils/ner_model_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def align_label(
texts_encoded,
labels_raw,
labels_to_ids,
ignore_token,
):
# generate label id vector for the network
# mark the tokens to be ignored
labels_aligned = []
# single sentence each time, so always use 0 index
# get the index mapping from token to word
# this can be dependent on the specific tokenizer
word_ids = texts_encoded.word_ids(batch_index=0)
previous_word_idx = None
for word_idx in word_ids:
if word_idx is None:
# set None the ignore tokens
labels_aligned.append(ignore_token)
elif word_idx != previous_word_idx:
# only label the first token of a word
labels_aligned.append(labels_to_ids[labels_raw[word_idx]])
else:
labels_aligned.append(ignore_token)
previous_word_idx = word_idx
return labels_aligned
class DataSequence(torch.utils.data.Dataset):
def __init__(self, df, labels_to_ids, tokenizer, ignore_token=-100, max_length=150):
# Raw texts and corresponding labels
texts_batch_raw = [i.split(" ") for i in df["text"].values.tolist()]
labels_batch_raw = [i.split(" ") for i in df["labels"].values.tolist()]
# Iterate through all cases
self.texts = []
self.labels = []
for batch_idx in range(len(texts_batch_raw)):
texts_raw = texts_batch_raw[batch_idx]
labels_raw = labels_batch_raw[batch_idx]
# Encode texts with tokenizer
texts_encoded = tokenizer.encode_plus(
texts_raw,
padding="max_length",
max_length=max_length,
add_special_tokens=True,
truncation=True,
is_split_into_words=True,
return_attention_mask=True,
return_tensors="pt",
)
labels_aligned = align_label(texts_encoded, labels_raw, labels_to_ids, ignore_token)
self.texts.append(texts_encoded)
self.labels.append(labels_aligned)
def __len__(self):
return len(self.labels)
def get_batch_data(self, idx):
return self.texts[idx]
def get_batch_labels(self, idx):
return torch.LongTensor(self.labels[idx])
def __getitem__(self, idx):
batch_data = self.get_batch_data(idx)
batch_labels = self.get_batch_labels(idx)
return batch_data, batch_labels
| NVFlare-main | examples/advanced/nlp-ner/custom/utils/data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import AutoModelForTokenClassification, AutoTokenizer
class BertModel(torch.nn.Module):
def __init__(self, model_name, num_labels):
super(BertModel, self).__init__()
self.num_labels = num_labels
self.model_name = model_name
self.model = AutoModelForTokenClassification.from_pretrained(
self.model_name, num_labels=self.num_labels, output_attentions=False, output_hidden_states=False
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
def forward(self, input_id, mask, label):
output = self.model(input_ids=input_id, attention_mask=mask, labels=label, return_dict=False)
return output
class GPTModel(torch.nn.Module):
def __init__(self, model_name, num_labels):
super(GPTModel, self).__init__()
self.num_labels = num_labels
self.model_name = model_name
self.model = AutoModelForTokenClassification.from_pretrained(
self.model_name,
num_labels=self.num_labels,
output_attentions=False,
output_hidden_states=False,
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, add_prefix_space=True)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.model.config.pad_token_id = self.model.config.eos_token_id
self.model.resize_token_embeddings(len(self.tokenizer))
def forward(self, input_id, mask, label):
output = self.model(input_ids=input_id, attention_mask=mask, labels=label, return_dict=False)
return output
| NVFlare-main | examples/advanced/nlp-ner/custom/models/nlp_models.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pandas as pd
import torch
from custom.models.nlp_models import BertModel, GPTModel
from custom.utils.data_sequence import DataSequence
from seqeval.metrics import classification_report
from torch.optim import AdamW
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class NLPLearner(Learner):
def __init__(
self,
data_path: str,
learning_rate: float = 1e-5,
batch_size: int = 32,
model_name: str = "bert-base-uncased",
num_labels: int = 3,
ignore_token: int = -100,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Supervised NLP task Learner.
This provides the basic functionality of a local learner for NLP models: perform before-train
validation on global model at the beginning of each round, perform local training,
and send the updated weights. No model will be saved locally, tensorboard record for
local loss and global model validation score.
Args:
data_path: path to dataset,
learning_rate,
batch_size,
model_name: the model name to be used in the pipeline
num_labels: num_labels for the model,
ignore_token: the value for representing padding / null token
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.model_name = model_name
self.num_labels = num_labels
self.ignore_token = ignore_token
self.lr = learning_rate
self.bs = batch_size
self.data_path = data_path
# client ID
self.client_id = None
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# Training-related
self.train_loader = None
self.valid_loader = None
self.optimizer = None
self.device = None
self.model = None
self.writer = None
self.best_metric = 0.0
self.labels_to_ids = None
self.ids_to_labels = None
def load_data(self):
df_train = pd.read_csv(os.path.join(self.data_path, self.client_id + "_train.csv"))
df_valid = pd.read_csv(os.path.join(self.data_path, self.client_id + "_val.csv"))
return df_train, df_valid
def get_labels(self, df_train):
labels = []
for x in df_train["labels"].values:
labels.extend(x.split(" "))
unique_labels = set(labels)
# check label length
if len(unique_labels) != self.num_labels:
self.system_panic(
f"num_labels {self.num_labels} need to align with dataset, actual data {len(unique_labels)}!", fl_ctx
)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.labels_to_ids = {k: v for v, k in enumerate(sorted(unique_labels))}
self.ids_to_labels = {v: k for v, k in enumerate(sorted(unique_labels))}
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
# get data from csv files
self.log_info(fl_ctx, f"Reading data from {self.data_path}")
df_train, df_valid = self.load_data()
# get labels from data
self.get_labels(df_train)
# initialize model
self.log_info(
fl_ctx,
f"Creating model {self.model_name}",
)
if self.model_name == "bert-base-uncased":
self.model = BertModel(model_name=self.model_name, num_labels=self.num_labels)
elif self.model_name == "gpt2":
self.model = GPTModel(model_name=self.model_name, num_labels=self.num_labels)
else:
self.system_panic(f"Model {self.model} not supported!", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
tokenizer = self.model.tokenizer
# set data
train_dataset = DataSequence(df_train, self.labels_to_ids, tokenizer=tokenizer, ignore_token=self.ignore_token)
valid_dataset = DataSequence(df_valid, self.labels_to_ids, tokenizer=tokenizer, ignore_token=self.ignore_token)
self.train_loader = DataLoader(train_dataset, num_workers=2, batch_size=self.bs, shuffle=True)
self.valid_loader = DataLoader(valid_dataset, num_workers=2, batch_size=self.bs, shuffle=False)
self.log_info(
fl_ctx,
f"Training Size: {len(self.train_loader.dataset)}, Validation Size: {len(self.valid_loader.dataset)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.optimizer = AdamW(self.model.parameters(), lr=self.lr)
def local_train(
self,
fl_ctx,
train_loader,
abort_signal: Signal,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader
Compute loss with self.model
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
mask = batch_data[0]["attention_mask"].squeeze(1).to(self.device)
input_id = batch_data[0]["input_ids"].squeeze(1).to(self.device)
train_label = batch_data[1].to(self.device)
# optimize
loss, logits = self.model(input_id, mask, train_label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
valid_loader,
abort_signal: Signal,
tb_id_pre=None,
record_epoch=None,
):
"""Typical validation logic
Load data pairs from train_loader
Compute outputs with model
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
self.model.eval()
with torch.no_grad():
total_acc_val, total_loss_val, val_total = 0, 0, 0
val_y_pred, val_y_true = [], []
for val_data, val_label in valid_loader:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_label = val_label.to(self.device)
val_total += val_label.shape[0]
mask = val_data["attention_mask"].squeeze(1).to(self.device)
input_id = val_data["input_ids"].squeeze(1).to(self.device)
# Inference
loss, logits = self.model(input_id, mask, val_label)
# Add items for metric computation
for i in range(logits.shape[0]):
# remove pad tokens
logits_clean = logits[i][val_label[i] != self.ignore_token]
label_clean = val_label[i][val_label[i] != self.ignore_token]
# calcluate acc and store prediciton and true labels
predictions = logits_clean.argmax(dim=1)
acc = (predictions == label_clean).float().mean()
total_acc_val += acc.item()
val_y_pred.append([self.ids_to_labels[x.item()] for x in predictions])
val_y_true.append([self.ids_to_labels[x.item()] for x in label_clean])
# compute metric
metric_dict = classification_report(y_true=val_y_true, y_pred=val_y_pred, output_dict=True, zero_division=0)
# tensorboard record id prefix, add to record if provided
if tb_id_pre:
self.writer.add_scalar(tb_id_pre + "_precision", metric_dict["macro avg"]["precision"], record_epoch)
self.writer.add_scalar(tb_id_pre + "_recall", metric_dict["macro avg"]["recall"], record_epoch)
self.writer.add_scalar(tb_id_pre + "_f1-score", metric_dict["macro avg"]["f1-score"], record_epoch)
return metric_dict["macro avg"]["f1-score"]
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline
Get global model weights (potentially with HE)
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# flush the tb writer
self.writer.flush()
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline
Get global model weights (potentially with HE)
Validation on local data
Return validation F-1 score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.valid_loader,
abort_signal,
tb_id_pre="val_global",
record_epoch=self.epoch_global,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_f1_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | examples/advanced/nlp-ner/custom/learners/nlp_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import os
import shutil
def parse_args(prog_name: str):
_parser = argparse.ArgumentParser(description=prog_name)
_parser.add_argument(
"--prepare-data",
dest="prepare_data",
action="store_const",
const=prepare_data,
help="prepare data based on configuration",
)
_parser.add_argument(
"-d",
"--dest",
type=str,
nargs="?",
default="",
help="destination directory where the data to download to",
)
return _parser, _parser.parse_args()
def get_data_url() -> dict:
client_data = {
"site-1": "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
"site-2": "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
}
return client_data
def prepare_data(data_root_dir: str):
print(f"prepare data for data directory {data_root_dir}")
client_data_urls = get_data_url()
for client in client_data_urls:
client_data_dir = os.path.join(data_root_dir, client)
if not os.path.exists(client_data_dir):
os.makedirs(client_data_dir, exist_ok=True)
dest = os.path.join(client_data_dir, "data.csv")
if os.path.exists(dest):
print(f"\nremove existing data at {dest}")
shutil.rmtree(dest, ignore_errors=True)
print(f"\ndownload to {dest}")
url = client_data_urls[client]
import requests
with open(dest, "w") as f:
writer = csv.writer(f)
r = requests.get(url, allow_redirects=True)
for line in r.iter_lines():
writer.writerow(line.decode("utf-8").split(","))
print("\ndone with prepare data")
def main():
prog_name = "data_utils"
parser, args = parse_args(prog_name)
if args.prepare_data:
prepare_data(args.dest)
else:
parser.print_help()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/federated-statistics/df_stats/utils/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import numpy as np
import pandas as pd
from pandas.core.series import Series
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.statistics_spec import BinRange, Feature, Histogram, HistogramType, Statistics
from nvflare.app_common.statistics.numpy_utils import dtype_to_data_type, get_std_histogram_buckets
class DFStatistics(Statistics):
def __init__(self, data_path):
super().__init__()
self.data_root_dir = "/tmp/nvflare/df_stats/data"
self.data_path = data_path
self.data: Optional[Dict[str, pd.DataFrame]] = None
self.data_features = [
"Age",
"Workclass",
"fnlwgt",
"Education",
"Education-Num",
"Marital Status",
"Occupation",
"Relationship",
"Race",
"Sex",
"Capital Gain",
"Capital Loss",
"Hours per week",
"Country",
"Target",
]
self.skip_rows = {
"site-1": [],
"site-2": [0],
}
def load_data(self, fl_ctx: FLContext) -> Dict[str, pd.DataFrame]:
client_name = fl_ctx.get_identity_name()
self.log_info(fl_ctx, f"load data for client {client_name}")
try:
skip_rows = self.skip_rows[client_name]
data_path = f"{self.data_root_dir}/{fl_ctx.get_identity_name()}/{self.data_path}"
# example of load data from CSV
df: pd.DataFrame = pd.read_csv(
data_path, names=self.data_features, sep=r"\s*,\s*", skiprows=skip_rows, engine="python", na_values="?"
)
train = df.sample(frac=0.8, random_state=200) # random state is a seed value
test = df.drop(train.index).sample(frac=1.0)
self.log_info(fl_ctx, f"load data done for client {client_name}")
return {"train": train, "test": test}
except Exception as e:
raise Exception(f"Load data for client {client_name} failed! {e}")
def initialize(self, fl_ctx: FLContext):
self.data = self.load_data(fl_ctx)
if self.data is None:
raise ValueError("data is not loaded. make sure the data is loaded")
def features(self) -> Dict[str, List[Feature]]:
results: Dict[str, List[Feature]] = {}
for ds_name in self.data:
df = self.data[ds_name]
results[ds_name] = []
for feature_name in df:
data_type = dtype_to_data_type(df[feature_name].dtype)
results[ds_name].append(Feature(feature_name, data_type))
return results
def count(self, dataset_name: str, feature_name: str) -> int:
df: pd.DataFrame = self.data[dataset_name]
return df[feature_name].count()
def sum(self, dataset_name: str, feature_name: str) -> float:
df: pd.DataFrame = self.data[dataset_name]
return df[feature_name].sum().item()
def mean(self, dataset_name: str, feature_name: str) -> float:
count: int = self.count(dataset_name, feature_name)
sum_value: float = self.sum(dataset_name, feature_name)
return sum_value / count
def stddev(self, dataset_name: str, feature_name: str) -> float:
df = self.data[dataset_name]
return df[feature_name].std().item()
def variance_with_mean(
self, dataset_name: str, feature_name: str, global_mean: float, global_count: float
) -> float:
df = self.data[dataset_name]
tmp = (df[feature_name] - global_mean) * (df[feature_name] - global_mean)
variance = tmp.sum() / (global_count - 1)
return variance.item()
def histogram(
self, dataset_name: str, feature_name: str, num_of_bins: int, global_min_value: float, global_max_value: float
) -> Histogram:
num_of_bins: int = num_of_bins
df = self.data[dataset_name]
feature: Series = df[feature_name]
flattened = feature.ravel()
flattened = flattened[flattened != np.array(None)]
buckets = get_std_histogram_buckets(flattened, num_of_bins, BinRange(global_min_value, global_max_value))
return Histogram(HistogramType.STANDARD, buckets)
def max_value(self, dataset_name: str, feature_name: str) -> float:
"""this is needed for histogram calculation, not used for reporting"""
df = self.data[dataset_name]
return df[feature_name].max()
def min_value(self, dataset_name: str, feature_name: str) -> float:
"""this is needed for histogram calculation, not used for reporting"""
df = self.data[dataset_name]
return df[feature_name].min()
| NVFlare-main | examples/advanced/federated-statistics/df_stats/jobs/df_stats/app/custom/df_statistics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from df_stats.custom.df_statistics import DFStatistics
from nvflare.apis.fl_context import FLContext
class TestDFStatistics:
def setup_method(self) -> None:
# mock the load_data with fake data
self.local_stats_gen = DFStatistics(data_path="data.csv")
self.local_stats_gen.load_data = self.load_data
self.local_stats_gen.initialize(fl_ctx=None)
def teardown_method(self) -> None:
pass
def load_data(self, fl_ctx: FLContext = None):
# initialize list of lists
data = [["tom", 10, 4], ["nick", 15, 7], ["juli", 14, 8]]
train_df = pd.DataFrame(data, columns=["Name", "Age", "Edu"])
data = [["sam", 90, 20], ["jack", 75, 20], ["sara", 44, 13]]
test_df = pd.DataFrame(data, columns=["Name", "Age", "Edu"])
return {"train": train_df, "test": test_df}
def test_get_features(self):
fs = self.local_stats_gen.features()
assert len(fs.keys()) == 2
assert "train" in fs.keys()
assert "test" in fs.keys()
assert fs["train"] == fs["test"]
assert [f.feature_name for f in fs["train"]] == ["Name", "Age", "Edu"]
def test_get_count(self):
count = self.local_stats_gen.count("train", "Age")
assert count == 3
| NVFlare-main | examples/advanced/federated-statistics/df_stats/jobs/df_stats/app/custom/tests/df_statistics_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import os
import random
SEED = 0
def create_datasets(root, subdirs, extension, shuffle, seed):
random.seed(seed)
data_lists = []
for subdir in subdirs:
search_string = os.path.join(root, "**", subdir, "images", "*" + extension)
data_list = glob.glob(search_string, recursive=True)
assert (
len(data_list) > 0
), f"No images found using {search_string} for subdir '{subdir}' and extension '{extension}'!"
if shuffle:
random.shuffle(data_list)
data_lists.append(data_list)
return data_lists
def save_data_list(data, data_list_file, data_root, key="data"):
data_list = []
for d in data:
data_list.append({"image": d.replace(data_root + os.path.sep, "")})
os.makedirs(os.path.dirname(data_list_file), exist_ok=True)
with open(data_list_file, "w") as f:
json.dump({key: data_list}, f, indent=4)
print(f"Saved {len(data_list)} entries at {data_list_file}")
def prepare_data(
input_dir: str,
input_ext: str = ".png",
output_dir: str = "/tmp/nvflare/image_stats/data",
sub_dirs: str = "COVID,Lung_Opacity,Normal,Viral Pneumonia",
):
sub_dir_list = [sd for sd in sub_dirs.split(",")]
data_lists = create_datasets(root=input_dir, subdirs=sub_dir_list, extension=input_ext, shuffle=True, seed=SEED)
print(f"Created {len(data_lists)} data lists for {sub_dir_list}.")
site_id = 1
for subdir, data_list in zip(sub_dir_list, data_lists):
save_data_list(data_list, os.path.join(output_dir, f"site-{site_id}_{subdir}.json"), data_root=input_dir)
site_id += 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str, required=True, help="Location of image files")
parser.add_argument("--input_ext", type=str, default=".png", help="Search extension")
parser.add_argument(
"--output_dir", type=str, default="/tmp/nvflare/image_stats/data", help="Output location of data lists"
)
parser.add_argument(
"--subdirs",
type=str,
default="COVID,Lung_Opacity,Normal,Viral Pneumonia",
help="A list of sub-folders to include.",
)
args = parser.parse_args()
assert "," in args.subdirs, "Expecting a comma separated list of subdirs names"
prepare_data(args.input_dir, args.input_ext, args.output_dir, args.subdirs)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/federated-statistics/image_stats/utils/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from typing import Dict, List, Optional
import numpy as np
from monai.data import ITKReader, load_decathlon_datalist
from monai.transforms import LoadImage
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.statistics_spec import Bin, DataType, Feature, Histogram, HistogramType, Statistics
from nvflare.security.logging import secure_log_traceback
class ImageStatistics(Statistics):
def __init__(self, data_root: str = "/tmp/nvflare/image_stats/data", data_list_key: str = "data"):
"""local image statistics generator .
Args:
data_root: directory with local image data.
data_list_key: data list key to use.
Returns:
a Shareable with the computed local statistics`
"""
super().__init__()
self.data_list_key = data_list_key
self.data_root = data_root
self.data_list = None
self.client_name = None
self.loader = None
self.failure_images = 0
self.fl_ctx = None
def initialize(self, fl_ctx: FLContext):
self.fl_ctx = fl_ctx
self.client_name = fl_ctx.get_identity_name()
self.loader = LoadImage()
self.loader.register(ITKReader())
self._load_data_list(self.client_name, fl_ctx)
if self.data_list is None:
raise ValueError("data is not loaded. make sure the data is loaded")
def _load_data_list(self, client_name, fl_ctx: FLContext) -> bool:
dataset_json = glob.glob(os.path.join(self.data_root, client_name + "*.json"))
if len(dataset_json) != 1:
self.log_error(
fl_ctx, f"No unique matching dataset list found in {self.data_root} for client {client_name}"
)
return False
dataset_json = dataset_json[0]
self.log_info(fl_ctx, f"Reading data from {dataset_json}")
data_list = load_decathlon_datalist(
data_list_file_path=dataset_json, data_list_key=self.data_list_key, base_dir=self.data_root
)
self.data_list = {"train": data_list}
self.log_info(fl_ctx, f"Client {client_name} has {len(self.data_list)} images")
return True
def pre_run(
self,
statistics: List[str],
num_of_bins: Optional[Dict[str, Optional[int]]],
bin_ranges: Optional[Dict[str, Optional[List[float]]]],
):
return {}
def features(self) -> Dict[str, List[Feature]]:
return {"train": [Feature("intensity", DataType.FLOAT)]}
def count(self, dataset_name: str, feature_name: str) -> int:
image_paths = self.data_list[dataset_name]
return len(image_paths)
def failure_count(self, dataset_name: str, feature_name: str) -> int:
return self.failure_images
def histogram(
self, dataset_name: str, feature_name: str, num_of_bins: int, global_min_value: float, global_max_value: float
) -> Histogram:
histogram_bins: List[Bin] = []
histogram = np.zeros((num_of_bins,), dtype=np.int64)
bin_edges = []
for i, entry in enumerate(self.data_list[dataset_name]):
file = entry.get("image")
try:
img, meta = self.loader(file)
curr_histogram, bin_edges = np.histogram(
img, bins=num_of_bins, range=(global_min_value, global_max_value)
)
histogram += curr_histogram
bin_edges = bin_edges.tolist()
if i % 100 == 0:
self.logger.info(
f"{self.client_name}, adding {i + 1} of {len(self.data_list[dataset_name])}: {file}"
)
except Exception as e:
self.failure_images += 1
self.logger.critical(
f"Failed to load file {file} with exception: {e.__str__()}. " f"Skipping this image..."
)
if num_of_bins + 1 != len(bin_edges):
secure_log_traceback()
raise ValueError(
f"bin_edges size: {len(bin_edges)} is not matching with number of bins + 1: {num_of_bins + 1}"
)
for j in range(num_of_bins):
low_value = bin_edges[j]
high_value = bin_edges[j + 1]
bin_sample_count = histogram[j]
histogram_bins.append(Bin(low_value=low_value, high_value=high_value, sample_count=bin_sample_count))
return Histogram(HistogramType.STANDARD, histogram_bins)
| NVFlare-main | examples/advanced/federated-statistics/image_stats/jobs/image_stats/app/custom/image_statistics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import datasets
def sklearn_dataset_args_parser():
parser = argparse.ArgumentParser(description="Load sklearn data and save to csv")
parser.add_argument("--dataset_name", type=str, choices=["iris", "cancer"], help="Dataset name")
parser.add_argument("--randomize", type=int, help="Whether to randomize data sequence")
parser.add_argument("--out_path", type=str, help="Path to output data file")
return parser
def load_data(dataset_name: str = "iris"):
if dataset_name == "iris":
dataset = datasets.load_iris()
elif dataset_name == "cancer":
dataset = datasets.load_breast_cancer()
else:
raise ValueError("Dataset unknown!")
return dataset
def download_data(
output_dir: str,
dataset_name: str = "iris",
randomize: bool = False,
filename: Optional[str] = None,
file_format="csv",
):
# Load data
dataset = load_data(dataset_name)
x = dataset.data
y = dataset.target
if randomize:
np.random.seed(0)
idx_random = np.random.permutation(len(y))
x = x[idx_random, :]
y = y[idx_random]
data = np.column_stack((y, x))
df = pd.DataFrame(data=data)
# Check if the target folder exists,
# If not, create
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
os.rmdir(output_dir)
os.makedirs(output_dir, exist_ok=True)
# Save to csv file
filename = filename if filename else f"{dataset_name}.csv"
if file_format == "csv":
file_path = os.path.join(output_dir, filename)
df.to_csv(file_path, sep=",", index=False, header=False)
else:
raise NotImplementedError
def main():
parser = argparse.ArgumentParser(description="Load sklearn data and save to csv")
parser.add_argument("--dataset_name", type=str, choices=["iris", "cancer"], help="Dataset name")
parser.add_argument("--randomize", type=int, help="Whether to randomize data sequence")
parser.add_argument("--out_path", type=str, help="Path to output data file")
args = parser.parse_args()
output_dir = os.path.dirname(args.out_path)
filename = os.path.basename(args.out_path)
download_data(output_dir, args.dataset_name, args.randomize, filename)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/sklearn-svm/utils/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pathlib
import shutil
from enum import Enum
from typing import List
import numpy as np
from nvflare.apis.fl_constant import JobConstants
JOBS_ROOT = "jobs"
class SplitMethod(Enum):
UNIFORM = "uniform"
LINEAR = "linear"
SQUARE = "square"
EXPONENTIAL = "exponential"
def job_config_args_parser():
parser = argparse.ArgumentParser(description="generate train configs with data split")
parser.add_argument("--task_name", type=str, help="Task name for the config")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument(
"--data_size",
type=int,
default=0,
help="Total data size, use if specified, in order to use partial data"
"If not specified, use the full data size fetched from file.",
)
parser.add_argument(
"--valid_frac",
type=float,
help="Validation fraction of the total size, N = round(total_size* valid_frac), "
"the first N to be treated as validation data. "
"special case valid_frac = 1, where all data will be used"
"in validation, e.g. for evaluating unsupervised clustering with known ground truth label.",
)
parser.add_argument(
"--split_method",
type=str,
default="uniform",
choices=["uniform", "linear", "square", "exponential"],
help="How to split the dataset",
)
parser.add_argument(
"--backend",
type=str,
default="sklearn",
choices=["sklearn", "cuml"],
help="Backend library used",
)
return parser
def get_split_ratios(site_num: int, split_method: SplitMethod):
if split_method == SplitMethod.UNIFORM:
ratio_vec = np.ones(site_num)
elif split_method == SplitMethod.LINEAR:
ratio_vec = np.linspace(1, site_num, num=site_num)
elif split_method == SplitMethod.SQUARE:
ratio_vec = np.square(np.linspace(1, site_num, num=site_num))
elif split_method == SplitMethod.EXPONENTIAL:
ratio_vec = np.exp(np.linspace(1, site_num, num=site_num))
else:
raise ValueError(f"Split method {split_method.name} not implemented!")
return ratio_vec
def split_num_proportion(n, site_num, split_method: SplitMethod) -> List[int]:
split = []
ratio_vec = get_split_ratios(site_num, split_method)
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def assign_data_index_to_sites(
data_size: int,
valid_fraction: float,
num_sites: int,
site_name_prefix: str,
split_method: SplitMethod = SplitMethod.UNIFORM,
) -> dict:
if valid_fraction > 1.0:
raise ValueError("validation percent should be less than or equal to 100% of the total data")
elif valid_fraction < 1.0:
valid_size = int(round(data_size * valid_fraction, 0))
train_size = data_size - valid_size
else:
valid_size = data_size
train_size = data_size
site_sizes = split_num_proportion(train_size, num_sites, split_method)
split_data_indices = {
"valid": {"start": 0, "end": valid_size},
}
for site in range(num_sites):
site_id = site_name_prefix + str(site + 1)
if valid_fraction < 1.0:
idx_start = valid_size + sum(site_sizes[:site])
idx_end = valid_size + sum(site_sizes[: site + 1])
else:
idx_start = sum(site_sizes[:site])
idx_end = sum(site_sizes[: site + 1])
split_data_indices[site_id] = {"start": idx_start, "end": idx_end}
return split_data_indices
def get_file_line_count(input_path: str) -> int:
count = 0
with open(input_path, "r") as fp:
for i, _ in enumerate(fp):
count += 1
return count
def split_data(
data_path: str,
site_num: int,
data_size: int,
valid_frac: float,
site_name_prefix: str = "site-",
split_method: SplitMethod = SplitMethod.UNIFORM,
):
size_total_file = get_file_line_count(data_path)
if data_size > 0:
if data_size > size_total_file:
raise ValueError("data_size should be less than or equal to the true data size")
else:
size_total = data_size
else:
size_total = size_total_file
site_indices = assign_data_index_to_sites(size_total, valid_frac, site_num, site_name_prefix, split_method)
return site_indices
def _read_json(filename):
if not os.path.isfile(filename):
raise ValueError(f"{filename} does not exist!")
with open(filename, "r") as f:
return json.load(f)
def _write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=2)
def _get_job_name(args) -> str:
return args.task_name + "_" + str(args.site_num) + "_" + args.split_method + "_" + args.backend
def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict:
deploy_map = {"app_server": ["server"]}
for i in range(1, num_sites + 1):
deploy_map[f"app_{site_name_prefix}{i}"] = [f"{site_name_prefix}{i}"]
return deploy_map
def _update_meta(meta: dict, args):
name = _get_job_name(args)
meta["name"] = name
meta["deploy_map"] = _gen_deploy_map(args.site_num, args.site_name_prefix)
meta["min_clients"] = args.site_num
def _update_client_config(config: dict, args, site_name: str, site_indices):
# update client config
# data path and training/validation row indices
config["components"][0]["args"]["backend"] = args.backend
config["components"][0]["args"]["data_path"] = args.data_path
config["components"][0]["args"]["train_start"] = site_indices[site_name]["start"]
config["components"][0]["args"]["train_end"] = site_indices[site_name]["end"]
config["components"][0]["args"]["valid_start"] = site_indices["valid"]["start"]
config["components"][0]["args"]["valid_end"] = site_indices["valid"]["end"]
def _update_server_config(config: dict, args):
config["min_clients"] = args.site_num
def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name):
dst_path = dst_job_path / dst_app_name / "custom"
os.makedirs(dst_path, exist_ok=True)
src_path = src_job_path / src_app_name / "custom"
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
def create_server_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
server_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.SERVER_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
_update_server_config(server_config, args)
server_config_filename = dst_config_path / JobConstants.SERVER_JOB_CONFIG
_write_json(server_config, server_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def create_client_app(src_job_path, src_app_name, dst_job_path, site_name, site_indices, args):
dst_app_name = f"app_{site_name}"
client_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.CLIENT_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
# adjust file contents according to each job's specs
_update_client_config(client_config, args, site_name, site_indices)
client_config_filename = dst_config_path / JobConstants.CLIENT_JOB_CONFIG
_write_json(client_config, client_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def main():
parser = job_config_args_parser()
args = parser.parse_args()
job_name = _get_job_name(args)
src_name = args.task_name + "_base"
src_job_path = pathlib.Path(JOBS_ROOT) / src_name
# create a new job
dst_job_path = pathlib.Path(JOBS_ROOT) / job_name
if not os.path.exists(dst_job_path):
os.makedirs(dst_job_path)
# update meta
meta_config_dst = dst_job_path / JobConstants.META_FILE
meta_config = _read_json(src_job_path / JobConstants.META_FILE)
_update_meta(meta_config, args)
_write_json(meta_config, meta_config_dst)
# create server side app
create_server_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name="server",
args=args,
)
# generate data split
site_indices = split_data(
args.data_path,
args.site_num,
args.data_size,
args.valid_frac,
args.site_name_prefix,
)
# create client side app
for i in range(1, args.site_num + 1):
create_client_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name=f"{args.site_name_prefix}{i}",
site_indices=site_indices,
args=args,
)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/sklearn-svm/utils/prepare_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from sklearn.metrics import roc_auc_score
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_opt.sklearn.data_loader import load_data_for_range
from nvflare.fuel.utils.import_utils import optional_import
class SVMLearner(Learner):
def __init__(
self,
backend: str,
data_path: str,
train_start: int,
train_end: int,
valid_start: int,
valid_end: int,
):
super().__init__()
self.backend = backend
if self.backend == "sklearn":
self.svm_lib, flag = optional_import(module="sklearn.svm")
if not flag:
self.log_error(fl_ctx, "Can't import sklearn.svm")
return
elif self.backend == "cuml":
self.svm_lib, flag = optional_import(module="cuml.svm")
if not flag:
self.log_error(fl_ctx, "Can't import cuml.svm")
return
else:
self.system_panic(f"backend SVM library {self.backend} unknown!", fl_ctx)
self.data_path = data_path
self.train_start = train_start
self.train_end = train_end
self.valid_start = valid_start
self.valid_end = valid_end
self.train_data = None
self.valid_data = None
self.n_samples = None
self.svm = None
self.kernel = None
self.params = {}
def load_data(self) -> dict:
train_data = load_data_for_range(self.data_path, self.train_start, self.train_end)
valid_data = load_data_for_range(self.data_path, self.valid_start, self.valid_end)
return {"train": train_data, "valid": valid_data}
def initialize(self, parts: dict, fl_ctx: FLContext):
data = self.load_data()
self.train_data = data["train"]
self.valid_data = data["valid"]
# train data size, to be used for setting
# NUM_STEPS_CURRENT_ROUND for potential use in aggregation
self.n_samples = data["train"][-1]
# model will be created after receiving global parameter of kernel
def train(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
if curr_round == 0:
# only perform training on the first round
(x_train, y_train, train_size) = self.train_data
self.kernel = global_param["kernel"]
self.svm = self.svm_lib.SVC(kernel=self.kernel)
# train model
self.svm.fit(x_train, y_train)
# get support vectors
index = self.svm.support_
local_support_x = x_train[index]
local_support_y = y_train[index]
self.params = {"support_x": local_support_x, "support_y": local_support_y}
elif curr_round > 1:
self.system_panic("Federated SVM only performs training for one round, system exiting.", fl_ctx)
return self.params, self.svm
def validate(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# local validation with global support vectors
# fit a standalone SVM with the global support vectors
svm_global = self.svm_lib.SVC(kernel=self.kernel)
support_x = global_param["support_x"]
support_y = global_param["support_y"]
svm_global.fit(support_x, support_y)
# validate global model
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = svm_global.predict(x_valid)
auc = roc_auc_score(y_valid, y_pred)
self.log_info(fl_ctx, f"AUC {auc:.4f}")
metrics = {"AUC": auc}
return metrics, svm_global
def finalize(self, fl_ctx: FLContext) -> None:
# freeing resources in finalize
del self.train_data
del self.valid_data
self.log_info(fl_ctx, "Freed training resources")
| NVFlare-main | examples/advanced/sklearn-svm/jobs/sklearn_svm_base/app/custom/svm_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
from sklearn.svm import SVC
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.aggregators.assembler import Assembler
from nvflare.app_common.app_constant import AppConstants
class SVMAssembler(Assembler):
def __init__(self, kernel):
super().__init__(data_kind=DataKind.WEIGHTS)
# Record the global support vectors
# so that only 1 round of training is performed
self.support_x = None
self.support_y = None
self.kernel = kernel
def get_model_params(self, dxo: DXO):
data = dxo.data
return {"support_x": data["support_x"], "support_y": data["support_y"]}
def assemble(self, data: Dict[str, dict], fl_ctx: FLContext) -> DXO:
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
if current_round == 0:
# First round, collect all support vectors from clients
support_x = []
support_y = []
for client in self.collection:
client_model = self.collection[client]
support_x.append(client_model["support_x"])
support_y.append(client_model["support_y"])
global_x = np.concatenate(support_x)
global_y = np.concatenate(support_y)
# perform one round of SVM to produce global model
svm_global = SVC(kernel=self.kernel)
svm_global.fit(global_x, global_y)
# get global support vectors
index = svm_global.support_
self.support_x = global_x[index]
self.support_y = global_y[index]
params = {"support_x": self.support_x, "support_y": self.support_y}
dxo = DXO(data_kind=self.expected_data_kind, data=params)
return dxo
| NVFlare-main | examples/advanced/sklearn-svm/jobs/sklearn_svm_base/app/custom/svm_assembler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import datasets
def load_data(dataset_name: str = "iris"):
if dataset_name == "iris":
dataset = datasets.load_iris()
elif dataset_name == "cancer":
dataset = datasets.load_breast_cancer()
else:
raise ValueError("Dataset unknown!")
return dataset
def prepare_data(
output_dir: str,
dataset_name: str = "iris",
randomize: bool = False,
filename: Optional[str] = None,
file_format="csv",
):
# Load data
dataset = load_data(dataset_name)
x = dataset.data
y = dataset.target
if randomize:
np.random.seed(0)
idx_random = np.random.permutation(len(y))
x = x[idx_random, :]
y = y[idx_random]
data = np.column_stack((y, x))
df = pd.DataFrame(data=data)
# Check if the target folder exists,
# If not, create
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
os.rmdir(output_dir)
os.makedirs(output_dir, exist_ok=True)
# Save to csv file
filename = filename if filename else f"{dataset_name}.csv"
if file_format == "csv":
file_path = os.path.join(output_dir, filename)
df.to_csv(file_path, sep=",", index=False, header=False)
else:
raise NotImplementedError
def main():
parser = argparse.ArgumentParser(description="Load sklearn data and save to csv")
parser.add_argument("--dataset_name", type=str, choices=["iris", "cancer"], help="Dataset name")
parser.add_argument("--randomize", type=int, help="Whether to randomize data sequence")
parser.add_argument("--out_path", type=str, help="Path to output data file")
args = parser.parse_args()
output_dir = os.path.dirname(args.out_path)
filename = os.path.basename(args.out_path)
prepare_data(output_dir, args.dataset_name, args.randomize, filename)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/sklearn-kmeans/utils/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pathlib
import shutil
from enum import Enum
from typing import List
import numpy as np
from nvflare.apis.fl_constant import JobConstants
JOBS_ROOT = "jobs"
class SplitMethod(Enum):
UNIFORM = "uniform"
LINEAR = "linear"
SQUARE = "square"
EXPONENTIAL = "exponential"
def job_config_args_parser():
parser = argparse.ArgumentParser(description="generate train configs with data split")
parser.add_argument("--task_name", type=str, help="Task name for the config")
parser.add_argument("--data_path", type=str, help="Path to data file")
parser.add_argument("--site_num", type=int, help="Total number of sites")
parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix")
parser.add_argument(
"--data_size",
type=int,
default=0,
help="Total data size, use if specified, in order to use partial data"
"If not specified, use the full data size fetched from file.",
)
parser.add_argument(
"--valid_frac",
type=float,
help="Validation fraction of the total size, N = round(total_size* valid_frac), "
"the first N to be treated as validation data. "
"special case valid_frac = 1, where all data will be used"
"in validation, e.g. for evaluating unsupervised clustering with known ground truth label.",
)
parser.add_argument(
"--split_method",
type=str,
default="uniform",
choices=["uniform", "linear", "square", "exponential"],
help="How to split the dataset",
)
return parser
def get_split_ratios(site_num: int, split_method: SplitMethod):
if split_method == SplitMethod.UNIFORM:
ratio_vec = np.ones(site_num)
elif split_method == SplitMethod.LINEAR:
ratio_vec = np.linspace(1, site_num, num=site_num)
elif split_method == SplitMethod.SQUARE:
ratio_vec = np.square(np.linspace(1, site_num, num=site_num))
elif split_method == SplitMethod.EXPONENTIAL:
ratio_vec = np.exp(np.linspace(1, site_num, num=site_num))
else:
raise ValueError(f"Split method {split_method.name} not implemented!")
return ratio_vec
def split_num_proportion(n, site_num, split_method: SplitMethod) -> List[int]:
split = []
ratio_vec = get_split_ratios(site_num, split_method)
total = sum(ratio_vec)
left = n
for site in range(site_num - 1):
x = int(n * ratio_vec[site] / total)
left = left - x
split.append(x)
split.append(left)
return split
def assign_data_index_to_sites(
data_size: int,
valid_fraction: float,
num_sites: int,
site_name_prefix: str,
split_method: SplitMethod = SplitMethod.UNIFORM,
) -> dict:
if valid_fraction > 1.0:
raise ValueError("validation percent should be less than or equal to 100% of the total data")
elif valid_fraction < 1.0:
valid_size = int(round(data_size * valid_fraction, 0))
train_size = data_size - valid_size
else:
valid_size = data_size
train_size = data_size
site_sizes = split_num_proportion(train_size, num_sites, split_method)
split_data_indices = {
"valid": {"start": 0, "end": valid_size},
}
for site in range(num_sites):
site_id = site_name_prefix + str(site + 1)
if valid_fraction < 1.0:
idx_start = valid_size + sum(site_sizes[:site])
idx_end = valid_size + sum(site_sizes[: site + 1])
else:
idx_start = sum(site_sizes[:site])
idx_end = sum(site_sizes[: site + 1])
split_data_indices[site_id] = {"start": idx_start, "end": idx_end}
return split_data_indices
def get_file_line_count(input_path: str) -> int:
count = 0
with open(input_path, "r") as fp:
for i, _ in enumerate(fp):
count += 1
return count
def split_data(
data_path: str,
site_num: int,
data_size: int,
valid_frac: float,
site_name_prefix: str = "site-",
split_method: SplitMethod = SplitMethod.UNIFORM,
):
size_total_file = get_file_line_count(data_path)
if data_size > 0:
if data_size > size_total_file:
raise ValueError("data_size should be less than or equal to the true data size")
else:
size_total = data_size
else:
size_total = size_total_file
site_indices = assign_data_index_to_sites(size_total, valid_frac, site_num, site_name_prefix, split_method)
return site_indices
def _read_json(filename):
if not os.path.isfile(filename):
raise ValueError(f"{filename} does not exist!")
with open(filename, "r") as f:
return json.load(f)
def _write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=2)
def _get_job_name(args) -> str:
return args.task_name + "_" + str(args.site_num) + "_" + args.split_method
def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict:
deploy_map = {"app_server": ["server"]}
for i in range(1, num_sites + 1):
deploy_map[f"app_{site_name_prefix}{i}"] = [f"{site_name_prefix}{i}"]
return deploy_map
def _update_meta(meta: dict, args):
name = _get_job_name(args)
meta["name"] = name
meta["deploy_map"] = _gen_deploy_map(args.site_num, args.site_name_prefix)
meta["min_clients"] = args.site_num
def _update_client_config(config: dict, args, site_name: str, site_indices):
# update client config
# data path and training/validation row indices
config["components"][0]["args"]["data_path"] = args.data_path
config["components"][0]["args"]["train_start"] = site_indices[site_name]["start"]
config["components"][0]["args"]["train_end"] = site_indices[site_name]["end"]
config["components"][0]["args"]["valid_start"] = site_indices["valid"]["start"]
config["components"][0]["args"]["valid_end"] = site_indices["valid"]["end"]
def _update_server_config(config: dict, args):
config["min_clients"] = args.site_num
def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name):
dst_path = dst_job_path / dst_app_name / "custom"
os.makedirs(dst_path, exist_ok=True)
src_path = src_job_path / src_app_name / "custom"
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path, dirs_exist_ok=True)
def create_server_app(src_job_path, src_app_name, dst_job_path, site_name, args):
dst_app_name = f"app_{site_name}"
server_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.SERVER_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
_update_server_config(server_config, args)
server_config_filename = dst_config_path / JobConstants.SERVER_JOB_CONFIG
_write_json(server_config, server_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def create_client_app(src_job_path, src_app_name, dst_job_path, site_name, site_indices, args):
dst_app_name = f"app_{site_name}"
client_config = _read_json(src_job_path / src_app_name / "config" / JobConstants.CLIENT_JOB_CONFIG)
dst_config_path = dst_job_path / dst_app_name / "config"
# make target config folders
if not os.path.exists(dst_config_path):
os.makedirs(dst_config_path)
# adjust file contents according to each job's specs
_update_client_config(client_config, args, site_name, site_indices)
client_config_filename = dst_config_path / JobConstants.CLIENT_JOB_CONFIG
_write_json(client_config, client_config_filename)
# copy custom file
_copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name)
def main():
parser = job_config_args_parser()
args = parser.parse_args()
job_name = _get_job_name(args)
src_name = args.task_name + "_base"
src_job_path = pathlib.Path(JOBS_ROOT) / src_name
# create a new job
dst_job_path = pathlib.Path(JOBS_ROOT) / job_name
if not os.path.exists(dst_job_path):
os.makedirs(dst_job_path)
# update meta
meta_config_dst = dst_job_path / JobConstants.META_FILE
meta_config = _read_json(src_job_path / JobConstants.META_FILE)
_update_meta(meta_config, args)
_write_json(meta_config, meta_config_dst)
# create server side app
create_server_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name="server",
args=args,
)
# generate data split
site_indices = split_data(
args.data_path,
args.site_num,
args.data_size,
args.valid_frac,
args.site_name_prefix,
)
# create client side app
for i in range(1, args.site_num + 1):
create_client_app(
src_job_path=src_job_path,
src_app_name="app",
dst_job_path=dst_job_path,
site_name=f"{args.site_name_prefix}{i}",
site_indices=site_indices,
args=args,
)
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/sklearn-kmeans/utils/prepare_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
from sklearn.cluster import KMeans, MiniBatchKMeans, kmeans_plusplus
from sklearn.metrics import homogeneity_score
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_opt.sklearn.data_loader import load_data_for_range
class KMeansLearner(Learner):
def __init__(
self,
data_path: str,
train_start: int,
train_end: int,
valid_start: int,
valid_end: int,
random_state: int = None,
max_iter: int = 1,
n_init: int = 1,
reassignment_ratio: int = 0,
):
super().__init__()
self.data_path = data_path
self.train_start = train_start
self.train_end = train_end
self.valid_start = valid_start
self.valid_end = valid_end
self.random_state = random_state
self.max_iter = max_iter
self.n_init = n_init
self.reassignment_ratio = reassignment_ratio
self.train_data = None
self.valid_data = None
self.n_samples = None
self.n_clusters = None
def load_data(self) -> dict:
train_data = load_data_for_range(self.data_path, self.train_start, self.train_end)
valid_data = load_data_for_range(self.data_path, self.valid_start, self.valid_end)
return {"train": train_data, "valid": valid_data}
def initialize(self, parts: dict, fl_ctx: FLContext):
data = self.load_data()
self.train_data = data["train"]
self.valid_data = data["valid"]
# train data size, to be used for setting
# NUM_STEPS_CURRENT_ROUND for potential use in aggregation
self.n_samples = data["train"][-1]
# note that the model needs to be created every round
# due to the available API for center initialization
def train(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# get training data, note that clustering is unsupervised
# so only x_train will be used
(x_train, y_train, train_size) = self.train_data
if curr_round == 0:
# first round, compute initial center with kmeans++ method
# model will be None for this round
self.n_clusters = global_param["n_clusters"]
center_local, _ = kmeans_plusplus(x_train, n_clusters=self.n_clusters, random_state=self.random_state)
kmeans = None
params = {"center": center_local, "count": None}
else:
center_global = global_param["center"]
# following rounds, local training starting from global center
kmeans = MiniBatchKMeans(
n_clusters=self.n_clusters,
batch_size=self.n_samples,
max_iter=self.max_iter,
init=center_global,
n_init=self.n_init,
reassignment_ratio=self.reassignment_ratio,
random_state=self.random_state,
)
kmeans.fit(x_train)
center_local = kmeans.cluster_centers_
count_local = kmeans._counts
params = {"center": center_local, "count": count_local}
return params, kmeans
def validate(self, curr_round: int, global_param: Optional[dict], fl_ctx: FLContext) -> Tuple[dict, dict]:
# local validation with global center
# fit a standalone KMeans with just the given center
center_global = global_param["center"]
kmeans_global = KMeans(n_clusters=self.n_clusters, init=center_global, n_init=1)
kmeans_global.fit(center_global)
# get validation data, both x and y will be used
(x_valid, y_valid, valid_size) = self.valid_data
y_pred = kmeans_global.predict(x_valid)
homo = homogeneity_score(y_valid, y_pred)
self.log_info(fl_ctx, f"Homogeneity {homo:.4f}")
metrics = {"Homogeneity": homo}
return metrics, kmeans_global
def finalize(self, fl_ctx: FLContext) -> None:
# freeing resources in finalize
del self.train_data
del self.valid_data
self.log_info(fl_ctx, "Freed training resources")
| NVFlare-main | examples/advanced/sklearn-kmeans/jobs/sklearn_kmeans_base/app/custom/kmeans_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
from sklearn.cluster import KMeans
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.aggregators.assembler import Assembler
from nvflare.app_common.app_constant import AppConstants
class KMeansAssembler(Assembler):
def __init__(self):
super().__init__(data_kind=DataKind.WEIGHTS)
# Aggregator needs to keep record of historical
# center and count information for mini-batch kmeans
self.center = None
self.count = None
self.n_cluster = 0
def get_model_params(self, dxo: DXO):
data = dxo.data
return {"center": data["center"], "count": data["count"]}
def assemble(self, data: Dict[str, dict], fl_ctx: FLContext) -> DXO:
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
if current_round == 0:
# First round, collect the information regarding n_feature and n_cluster
# Initialize the aggregated center and count to all zero
client_0 = list(self.collection.keys())[0]
self.n_cluster = self.collection[client_0]["center"].shape[0]
n_feature = self.collection[client_0]["center"].shape[1]
self.center = np.zeros([self.n_cluster, n_feature])
self.count = np.zeros([self.n_cluster])
# perform one round of KMeans over the submitted centers
# to be used as the original center points
# no count for this round
center_collect = []
for _, record in self.collection.items():
center_collect.append(record["center"])
centers = np.concatenate(center_collect)
kmeans_center_initial = KMeans(n_clusters=self.n_cluster)
kmeans_center_initial.fit(centers)
self.center = kmeans_center_initial.cluster_centers_
else:
# Mini-batch k-Means step to assemble the received centers
for center_idx in range(self.n_cluster):
centers_global_rescale = self.center[center_idx] * self.count[center_idx]
# Aggregate center, add new center to previous estimate, weighted by counts
for _, record in self.collection.items():
centers_global_rescale += record["center"][center_idx] * record["count"][center_idx]
self.count[center_idx] += record["count"][center_idx]
# Rescale to compute mean of all points (old and new combined)
alpha = 1 / self.count[center_idx]
centers_global_rescale *= alpha
# Update the global center
self.center[center_idx] = centers_global_rescale
params = {"center": self.center}
dxo = DXO(data_kind=self.expected_data_kind, data=params)
return dxo
| NVFlare-main | examples/advanced/sklearn-kmeans/jobs/sklearn_kmeans_base/app/custom/kmeans_assembler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt.pt_constants import PTConstants
from pt.simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.abstract.model import (
ModelLearnable,
ModelLearnableKey,
make_model_learnable,
model_learnable_to_dxo,
)
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTLearner(Learner):
def __init__(self, data_path="~/data", lr=0.01, epochs=5, exclude_vars=None, analytic_sender_id="analytic_sender"):
"""Simple PyTorch Learner that trains and validates a simple network on the CIFAR10 dataset.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
exclude_vars (list): List of variables to exclude during model loading.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
"""
super().__init__()
self.writer = None
self.persistence_manager = None
self.default_train_conf = None
self.test_loader = None
self.test_data = None
self.n_iterations = None
self.train_loader = None
self.train_dataset = None
self.optimizer = None
self.loss = None
self.device = None
self.model = None
self.data_path = data_path
self.lr = lr
self.epochs = epochs
self.exclude_vars = exclude_vars
self.analytic_sender_id = analytic_sender_id
def initialize(self, parts: dict, fl_ctx: FLContext):
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
# Create CIFAR10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self.train_dataset = CIFAR10(root=self.data_path, transform=transforms, download=True, train=True)
self.train_loader = DataLoader(self.train_dataset, batch_size=32, shuffle=True)
self.n_iterations = len(self.train_loader)
# Create CIFAR10 dataset for validation.
self.test_data = CIFAR10(root=self.data_path, train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=32, shuffle=False)
# Set up the persistence manager to save PT model.
# The default training configuration is used by persistence manager in case no initial model is found.
self.default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self.default_train_conf
)
# metrics streaming setup
self.writer = parts.get(self.analytic_sender_id) # user configuration from config_fed_client.json
if not self.writer:
raise RuntimeError("missing Analytics Sender Component")
def train(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
params = {}
params.update({"learning_rate": self.lr, "momentum": 0.8, "loss": "CrossEntropyLoss"})
self.writer.log_params(params)
self.writer.set_tags({"client": fl_ctx.get_identity_name()})
# Get model weights
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
# Set the model weights
self.model.load_state_dict(state_dict=torch_weights)
self.local_train(fl_ctx, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=new_weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self.n_iterations}
)
return outgoing_dxo.to_shareable()
def local_train(self, fl_ctx, abort_signal):
# Basic training
for epoch in range(self.epochs):
self.model.train()
running_loss = 0.0
for i, batch in enumerate(self.train_loader):
if abort_signal.triggered:
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self.epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
self.writer.log_text(
f"last running_loss reset at '{len(self.train_loader) * epoch + i}' step",
"running_loss_reset.txt",
)
# Stream training loss at each step
current_step = len(self.train_loader) * epoch + i
self.writer.log_metrics({"train_loss": cost.item(), "running_loss": running_loss}, current_step)
# Stream validation accuracy at the end of each epoch
metric = self.local_validate(abort_signal)
self.writer.log_metric("validation_accuracy", metric, epoch)
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_job_id())
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self.default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self.exclude_vars)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
def validate(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
model_owner = "?"
try:
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
if isinstance(dxo.data, ModelLearnable):
dxo.data = dxo.data[ModelLearnableKey.WEIGHTS]
# Extract weights and ensure they are tensor.
model_owner = data.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
self.model.load_state_dict(weights)
# Get validation accuracy
val_accuracy = self.local_validate(abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def local_validate(self, abort_signal):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
| NVFlare-main | examples/advanced/experiment-tracking/pt/learner_with_mlflow.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNetwork(nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | examples/advanced/experiment-tracking/pt/simple_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt.pt_constants import PTConstants
from pt.simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.abstract.model import (
ModelLearnable,
ModelLearnableKey,
make_model_learnable,
model_learnable_to_dxo,
)
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTLearner(Learner):
def __init__(
self,
data_path="/tmp/nvflare/tensorboard-streaming",
lr=0.01,
epochs=5,
exclude_vars=None,
analytic_sender_id="analytic_sender",
):
"""Simple PyTorch Learner that trains and validates a simple network on the CIFAR10 dataset.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
exclude_vars (list): List of variables to exclude during model loading.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
"""
super().__init__()
self.writer = None
self.persistence_manager = None
self.default_train_conf = None
self.test_loader = None
self.test_data = None
self.n_iterations = None
self.train_loader = None
self.train_dataset = None
self.optimizer = None
self.loss = None
self.device = None
self.model = None
self.data_path = data_path
self.lr = lr
self.epochs = epochs
self.exclude_vars = exclude_vars
self.analytic_sender_id = analytic_sender_id
def initialize(self, parts: dict, fl_ctx: FLContext):
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
# Create CIFAR10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self.train_dataset = CIFAR10(root=self.data_path, transform=transforms, download=True, train=True)
self.train_loader = DataLoader(self.train_dataset, batch_size=32, shuffle=True)
self.n_iterations = len(self.train_loader)
# Create CIFAR10 dataset for validation.
self.test_data = CIFAR10(root=self.data_path, train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=32, shuffle=False)
# Set up the persistence manager to save PT model.
# The default training configuration is used by persistence manager in case no initial model is found.
self.default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self.default_train_conf
)
# Tensorboard streaming setup
self.writer = parts.get(self.analytic_sender_id) # user configuration from config_fed_client.json
if not self.writer: # else use local TensorBoard writer only
self.writer = SummaryWriter(fl_ctx.get_prop(FLContextKey.APP_ROOT))
def train(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Get model weights
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
# Set the model weights
self.model.load_state_dict(state_dict=torch_weights)
self.local_train(fl_ctx, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=new_weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self.n_iterations}
)
return outgoing_dxo.to_shareable()
def local_train(self, fl_ctx, abort_signal):
# Basic training
for epoch in range(self.epochs):
self.model.train()
running_loss = 0.0
for i, batch in enumerate(self.train_loader):
if abort_signal.triggered:
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self.epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
# Stream training loss at each step
current_step = len(self.train_loader) * epoch + i
self.writer.add_scalar("train_loss", cost.item(), current_step)
# Stream validation accuracy at the end of each epoch
metric = self.local_validate(abort_signal)
self.writer.add_scalar("validation_accuracy", metric, epoch)
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_job_id())
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self.default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self.exclude_vars)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
def validate(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
model_owner = "?"
try:
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
if isinstance(dxo.data, ModelLearnable):
dxo.data = dxo.data[ModelLearnableKey.WEIGHTS]
# Extract weights and ensure they are tensor.
model_owner = data.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
self.model.load_state_dict(weights)
# Get validation accuracy
val_accuracy = self.local_validate(abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def local_validate(self, abort_signal):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
| NVFlare-main | examples/advanced/experiment-tracking/pt/learner_with_tb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
import numpy
from pt.learner_with_tb import PTLearner
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
class TestPTLearner:
@patch.object(PTLearner, "save_local_model")
def test_train_empty_input(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.train(data, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def test_train_with_empty_input(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.train(data, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def test_train_with_invalid_data_kind(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(DataKind.WEIGHT_DIFF, data={"x": numpy.array([1, 2, 3])})
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(PTLearner, "save_local_model")
def test_train(self, mock_save_local_model):
fl_ctx = FLContext()
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=learner.model.state_dict())
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK
@patch.object(FLContext, "get_engine")
def test_validate_with_empty_input(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
data = Shareable()
result = learner.validate(data, fl_ctx=fl_ctx, abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(FLContext, "get_engine")
def test_validate_with_invalid_data_kind(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(DataKind.WEIGHT_DIFF, data={"x": numpy.array([1, 2, 3])})
result = learner.validate(dxo.to_shareable(), fl_ctx=fl_ctx, abort_signal=Signal())
assert result.get_return_code() == ReturnCode.BAD_TASK_DATA
@patch.object(FLContext, "get_engine")
def test_validate(self, mock_get_engine):
mock_get_engine.get_workspace = Mock()
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.RUN_NUM, 100)
learner = PTLearner(epochs=1)
learner.initialize(parts={}, fl_ctx=fl_ctx)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=learner.model.state_dict())
result = learner.train(dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK
| NVFlare-main | examples/advanced/experiment-tracking/pt/test_custom.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt.pt_constants import PTConstants
from pt.simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.abstract.model import (
ModelLearnable,
ModelLearnableKey,
make_model_learnable,
model_learnable_to_dxo,
)
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.pt.pt_fed_utils import PTModelPersistenceFormatManager
class PTLearner(Learner):
def __init__(self, data_path="~/data", lr=0.01, epochs=5, exclude_vars=None, analytic_sender_id="analytic_sender"):
"""Simple PyTorch Learner that trains and validates a simple network on the CIFAR10 dataset.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
exclude_vars (list): List of variables to exclude during model loading.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
"""
super().__init__()
self.writer = None
self.persistence_manager = None
self.default_train_conf = None
self.test_loader = None
self.test_data = None
self.n_iterations = None
self.train_loader = None
self.train_dataset = None
self.optimizer = None
self.loss = None
self.device = None
self.model = None
self.data_path = data_path
self.lr = lr
self.epochs = epochs
self.exclude_vars = exclude_vars
self.analytic_sender_id = analytic_sender_id
def initialize(self, parts: dict, fl_ctx: FLContext):
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
# Create CIFAR10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self.train_dataset = CIFAR10(root=self.data_path, transform=transforms, download=True, train=True)
self.train_loader = DataLoader(self.train_dataset, batch_size=32, shuffle=True)
self.n_iterations = len(self.train_loader)
# Create CIFAR10 dataset for validation.
self.test_data = CIFAR10(root=self.data_path, train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=32, shuffle=False)
# Set up the persistence manager to save PT model.
# The default training configuration is used by persistence manager in case no initial model is found.
self.default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self.default_train_conf
)
self.writer = parts.get(self.analytic_sender_id) # user configuration from config_fed_client.json
if not self.writer:
raise RuntimeError("analytic_sender is not provided")
def train(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Get model weights
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
# Set the model weights
self.model.load_state_dict(state_dict=torch_weights)
self.local_train(fl_ctx, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self.save_local_model(fl_ctx)
# Get the new state dict and send as weights
new_weights = self.model.state_dict()
new_weights = {k: v.cpu().numpy() for k, v in new_weights.items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=new_weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self.n_iterations}
)
return outgoing_dxo.to_shareable()
def local_train(self, fl_ctx, abort_signal):
# Basic training
for epoch in range(self.epochs):
self.model.train()
running_loss = 0.0
for i, batch in enumerate(self.train_loader):
if abort_signal.triggered:
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self.epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
# Stream training loss at each step
current_step = len(self.train_loader) * epoch + i
self.writer.log({"train_loss": cost.item()}, current_step)
# Stream validation accuracy at the end of each epoch
metric = self.local_validate(abort_signal)
self.writer.log({"validation_accuracy": metric}, epoch)
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_job_id())
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self.default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self.exclude_vars)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
def validate(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
model_owner = "?"
try:
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
if isinstance(dxo.data, ModelLearnable):
dxo.data = dxo.data[ModelLearnableKey.WEIGHTS]
# Extract weights and ensure they are tensor.
model_owner = data.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
self.model.load_state_dict(weights)
# Get validation accuracy
val_accuracy = self.local_validate(abort_signal)
self.writer.log({"validation_accuracy": val_accuracy})
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def local_validate(self, abort_signal):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
def save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
| NVFlare-main | examples/advanced/experiment-tracking/pt/learner_with_wandb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
PTModelsDir = "models"
CrossValResultsJsonFilename = "cross_val_results.json"
| NVFlare-main | examples/advanced/experiment-tracking/pt/pt_constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from splitnn.cifar10_vertical_data_splitter import Cifar10VerticalDataSplitter
from nvflare.apis.fl_context import FLContext
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
import argparse
from nvflare.apis.fl_constant import ReservedKey
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--split_dir", type=str, default="/tmp/cifar10_vert_splits", help="output folder")
parser.add_argument("--overlap", type=int, default=10_000, help="number of overlapping samples")
args = parser.parse_args()
splitter = Cifar10VerticalDataSplitter(split_dir=args.split_dir, overlap=args.overlap)
# set up a dummy context for logging
fl_ctx = FLContext()
fl_ctx.set_prop(ReservedKey.IDENTITY_NAME, "local")
fl_ctx.set_prop(ReservedKey.RUN_NUM, "_")
splitter.split(fl_ctx) # will download to CIFAR10_ROOT defined in
# Cifar10DataSplitter
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/cifar10_split_data_vertical.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_file",
type=str,
default="./config_fed_client.json",
help="config file in JSON format",
)
parser.add_argument(
"--intersection_file",
type=str,
help="Intersection file with overlapping data indices",
)
args = parser.parse_args()
with open(args.config_file, "r") as f:
config = json.load(f)
config["INTERSECTION_FILE"] = args.intersection_file
with open(args.config_file, "w") as f:
json.dump(config, f, indent=4)
f.write("\n")
print(f"Modified {args.config_file} to use INTERSECTION_FILE={config['INTERSECTION_FILE']}")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/set_intersection_file.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from psi.cifar10_local_psi import Cifar10LocalPSI
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/psi/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import List
import numpy as np
from nvflare.app_common.psi.psi_spec import PSI
class Cifar10LocalPSI(PSI):
def __init__(self, psi_writer_id: str, data_path: str = "/tmp/data.csv"):
super().__init__(psi_writer_id)
self.data_path = data_path
self.data = {}
if not os.path.isfile(self.data_path):
raise RuntimeError(f"invalid data path {data_path}")
def load_items(self) -> List[str]:
_ext = os.path.splitext(self.data_path)[1]
items = np.load(self.data_path)
return [str(i) for i in items]
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/psi/cifar10_local_psi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pt.networks.cifar10_nets import ModerateCNN
class SplitNN(ModerateCNN):
def __init__(self, split_id):
super().__init__()
if split_id not in [0, 1]:
raise ValueError(f"Only supports split_id '0' or '1' but was {self.split_id}")
self.split_id = split_id
if self.split_id == 0:
self.split_forward = self.conv_layer
elif self.split_id == 1:
self.split_forward = self.fc_layer
else:
raise ValueError(f"Expected split_id to be '0' or '1' but was {self.split_id}")
def forward(self, x):
x = self.split_forward(x)
return x
def get_split_id(self):
return self.split_id
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/splitnn/split_nn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from timeit import default_timer as timer
import numpy as np
import torch
import torch.optim as optim
from splitnn.cifar10_splitnn_dataset import CIFAR10SplitNN
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from nvflare.apis.dxo import DXO, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.workflows.splitnn_workflow import SplitNNConstants, SplitNNDataKind
from nvflare.app_opt.pt.decomposers import TensorDecomposer
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.fuel.utils import fobs
class CIFAR10LearnerSplitNN(Learner):
def __init__(
self,
dataset_root: str = "./dataset",
intersection_file: str = None,
lr: float = 1e-2,
model: dict = None,
analytic_sender_id: str = "analytic_sender",
fp16: bool = True,
val_freq: int = 1000,
):
"""Simple CIFAR-10 Trainer for split learning.
Args:
dataset_root: directory with CIFAR-10 data.
intersection_file: Optional. intersection file specifying overlapping indices between both clients.
Defaults to `None`, i.e. the whole training dataset is used.
lr: learning rate.
model: Split learning model.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
fp16: If `True`, convert activations and gradients send between clients to `torch.float16`.
Reduces bandwidth needed for communication but might impact model accuracy.
val_freq: how often to perform validation in rounds. Defaults to 1000. No validation if <= 0.
"""
super().__init__()
self.dataset_root = dataset_root
self.intersection_file = intersection_file
self.lr = lr
self.model = model
self.analytic_sender_id = analytic_sender_id
self.fp16 = fp16
self.val_freq = val_freq
self.target_names = None
self.app_root = None
self.current_round = None
self.num_rounds = None
self.batch_size = None
self.writer = None
self.client_name = None
self.other_client = None
self.device = None
self.optimizer = None
self.criterion = None
self.transform_train = None
self.transform_valid = None
self.train_dataset = None
self.valid_dataset = None
self.split_id = None
self.train_activations = None
self.train_batch_indices = None
self.train_size = 0
self.val_loss = []
self.val_labels = []
self.val_pred_labels = []
self.compute_stats_pool = None
# use FOBS serializing/deserializing PyTorch tensors
fobs.register(TensorDecomposer)
def _get_model(self, fl_ctx: FLContext):
"""Get model from client config. Modelled after `PTFileModelPersistor`."""
if isinstance(self.model, str):
# treat it as model component ID
model_component_id = self.model
engine = fl_ctx.get_engine()
self.model = engine.get_component(model_component_id)
if not self.model:
self.log_error(fl_ctx, f"cannot find model component '{model_component_id}'")
return
if self.model and isinstance(self.model, dict):
# try building the model
try:
engine = fl_ctx.get_engine()
# use provided or default optimizer arguments and add the model parameters
if "args" not in self.model:
self.model["args"] = {}
self.model = engine.build_component(self.model)
except Exception as e:
self.system_panic(
f"Exception while parsing `model`: " f"{self.model} with Exception {e}",
fl_ctx,
)
return
if self.model and not isinstance(self.model, torch.nn.Module):
self.system_panic(f"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}", fl_ctx)
return
if self.model is None:
self.system_panic(f"Model wasn't built correctly! It is {self.model}", fl_ctx)
return
self.log_info(fl_ctx, f"Running model {self.model}")
def initialize(self, parts: dict, fl_ctx: FLContext):
t_start = timer()
self._get_model(fl_ctx=fl_ctx)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
self.criterion = torch.nn.CrossEntropyLoss()
self.transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(4, padding_mode="reflect"),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.transform_valid = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.client_name = fl_ctx.get_identity_name()
self.split_id = self.model.get_split_id()
self.log_info(fl_ctx, f"Running `split_id` {self.split_id} on site `{self.client_name}`")
if self.split_id == 0: # data side
data_returns = "image"
elif self.split_id == 1: # label side
data_returns = "label"
else:
raise ValueError(f"Expected split_id to be '0' or '1' but was {self.split_id}")
if self.intersection_file is not None:
_intersect_indices = np.loadtxt(self.intersection_file)
else:
_intersect_indices = None
self.train_dataset = CIFAR10SplitNN(
root=self.dataset_root,
train=True,
download=True,
transform=self.transform_train,
returns=data_returns,
intersect_idx=_intersect_indices,
)
self.valid_dataset = CIFAR10SplitNN(
root=self.dataset_root,
train=False,
download=False,
transform=self.transform_valid,
returns=data_returns,
intersect_idx=None, # TODO: support validation intersect indices
)
self.train_size = len(self.train_dataset)
if self.train_size <= 0:
raise ValueError(f"Expected train dataset size to be larger zero but got {self.train_size}")
self.log_info(fl_ctx, f"Training with {self.train_size} overlapping indices of {self.train_dataset.orig_size}.")
# Select local TensorBoard writer or event-based writer for streaming
if self.split_id == 1: # metrics can only be computed for client with labels
self.writer = parts.get(self.analytic_sender_id) # user configured config_fed_client.json for streaming
if not self.writer: # use local TensorBoard writer only
self.writer = SummaryWriter(self.app_root)
# register aux message handlers
engine = fl_ctx.get_engine()
if self.split_id == 1:
engine.register_aux_message_handler(
topic=SplitNNConstants.TASK_TRAIN_LABEL_STEP, message_handle_func=self._aux_train_label_side
)
engine.register_aux_message_handler(
topic=SplitNNConstants.TASK_VALID_LABEL_STEP, message_handle_func=self._aux_val_label_side
)
self.log_debug(fl_ctx, f"Registered aux message handlers for split_id {self.split_id}")
self.compute_stats_pool = StatsPoolManager.add_time_hist_pool(
"Compute_Time", "Compute time in secs", scope=self.client_name
)
self.compute_stats_pool.record_value(category="initialize", value=timer() - t_start)
""" training steps """
def _train_step_data_side(self, batch_indices):
t_start = timer()
self.model.train()
inputs = self.train_dataset.get_batch(batch_indices)
inputs = inputs.to(self.device)
self.train_activations = self.model.forward(inputs) # keep on site-1
self.compute_stats_pool.record_value(category="_train_step_data_side", value=timer() - t_start)
return self.train_activations.detach().requires_grad_() # x to be sent to other client
def _val_step_data_side(self, batch_indices):
t_start = timer()
self.model.eval()
inputs = self.valid_dataset.get_batch(batch_indices)
inputs = inputs.to(self.device)
_val_activations = self.model.forward(inputs) # keep on site-1
self.compute_stats_pool.record_value(category="_val_step_data_side", value=timer() - t_start)
return _val_activations.detach().flatten(start_dim=1, end_dim=-1) # x to be sent to other client
def _train_step_label_side(self, batch_indices, activations, fl_ctx: FLContext):
t_start = timer()
self.model.train()
self.optimizer.zero_grad()
labels = self.train_dataset.get_batch(batch_indices)
labels = labels.to(self.device)
if self.fp16:
activations = activations.type(torch.float32) # return to default pytorch precision
activations = activations.to(self.device)
activations.requires_grad_(True)
pred = self.model.forward(activations)
loss = self.criterion(pred, labels)
loss.backward()
_, pred_labels = torch.max(pred, 1)
acc = (pred_labels == labels).sum() / len(labels)
if self.current_round % 100 == 0:
self.log_info(
fl_ctx,
f"Round {self.current_round}/{self.num_rounds} train_loss: {loss.item():.4f}, train_accuracy: {acc.item():.4f}",
)
if self.writer:
self.writer.add_scalar("train_loss", loss, self.current_round)
self.writer.add_scalar("train_accuracy", acc, self.current_round)
self.optimizer.step()
self.compute_stats_pool.record_value(category="_train_step_label_side", value=timer() - t_start)
if not isinstance(activations.grad, torch.Tensor):
raise ValueError("No valid gradients available!")
# gradient to be returned to other client
if self.fp16:
return activations.grad.type(torch.float16)
else:
return activations.grad
def _val_step_label_side(self, batch_indices, activations, fl_ctx: FLContext):
t_start = timer()
self.model.eval()
labels = self.valid_dataset.get_batch(batch_indices)
labels = labels.to(self.device)
if self.fp16:
activations = activations.type(torch.float32) # return to default pytorch precision
activations = activations.to(self.device)
pred = self.model.forward(activations)
loss = self.criterion(pred, labels)
self.val_loss.append(loss.unsqueeze(0)) # unsqueeze needed for later concatenation
_, pred_labels = torch.max(pred, 1)
self.val_pred_labels.extend(pred_labels.unsqueeze(0))
self.val_labels.extend(labels.unsqueeze(0))
self.compute_stats_pool.record_value(category="_val_step_label_side", value=timer() - t_start)
def _log_validation(self, fl_ctx: FLContext):
if len(self.val_loss) > 0:
loss = torch.mean(torch.cat(self.val_loss))
_val_pred_labels = torch.cat(self.val_pred_labels)
_val_labels = torch.cat(self.val_labels)
acc = (_val_pred_labels == _val_labels).sum() / len(_val_labels)
self.log_info(
fl_ctx,
f"Round {self.current_round}/{self.num_rounds} val_loss: {loss.item():.4f}, val_accuracy: {acc.item():.4f}",
)
if self.writer:
self.writer.add_scalar("val_loss", loss, self.current_round)
self.writer.add_scalar("val_accuracy", acc, self.current_round)
self.val_loss = []
self.val_labels = []
self.val_pred_labels = []
def _backward_step_data_side(self, gradient, fl_ctx: FLContext):
t_start = timer()
self.model.train()
self.optimizer.zero_grad()
if self.fp16:
gradient = gradient.type(torch.float32) # return to default pytorch precision
gradient = gradient.to(self.device)
self.train_activations.backward(gradient=gradient.reshape(self.train_activations.shape))
self.optimizer.step()
self.log_debug(
fl_ctx, f"{self.client_name} runs model with `split_id` {self.split_id} for backward step on data side."
)
self.compute_stats_pool.record_value(category="_backward_step_data_side", value=timer() - t_start)
def _train_forward_backward_data_side(self, fl_ctx: FLContext, gradient=None) -> Shareable:
t_start = timer()
# combine forward and backward on data client
# 1. perform backward step if gradients provided
if gradient is not None:
result_backward = self._backward_data_side(gradient, fl_ctx=fl_ctx)
assert (
result_backward.get_return_code() == ReturnCode.OK
), f"Backward step failed with return code {result_backward.get_return_code()}"
# 2. compute activations
activations = self._train_data_side(fl_ctx=fl_ctx)
self.compute_stats_pool.record_value(category="_train_forward_backward_data_side", value=timer() - t_start)
return activations.flatten(start_dim=1, end_dim=-1) # keep batch dim
def _train_data_side(self, fl_ctx: FLContext) -> Shareable:
t_start = timer()
if self.split_id != 0:
raise ValueError(
f"Expected `split_id` 0. It doesn't make sense to run `_train_data_side` with `split_id` {self.split_id}"
)
self.log_debug(fl_ctx, f"Train data side in round {self.current_round} of {self.num_rounds} rounds.")
act = self._train_step_data_side(batch_indices=self.train_batch_indices)
self.log_debug(
fl_ctx, f"{self.client_name} finished model with `split_id` {self.split_id} for train on data side."
)
self.compute_stats_pool.record_value(category="_train_data_side", value=timer() - t_start)
self.log_debug(fl_ctx, f"Sending train data activations: {type(act)}")
if self.fp16:
return act.type(torch.float16)
else:
return act
def _aux_train_label_side(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""train aux message handler"""
t_start = timer()
if self.split_id != 1:
raise ValueError(
f"Expected `split_id` 1. It doesn't make sense to run `_aux_train_label_side` with `split_id` {self.split_id}"
)
self.current_round = request.get_header(AppConstants.CURRENT_ROUND)
self.num_rounds = request.get_header(AppConstants.NUM_ROUNDS)
self.log_debug(fl_ctx, f"Train label in round {self.current_round} of {self.num_rounds} rounds.")
dxo = from_shareable(request)
if dxo.data_kind != SplitNNDataKind.ACTIVATIONS:
raise ValueError(f"Expected data kind {SplitNNDataKind.ACTIVATIONS} but received {dxo.data_kind}")
batch_indices = dxo.get_meta_prop(SplitNNConstants.BATCH_INDICES)
if batch_indices is None:
raise ValueError("No batch indices in DXO!")
activations = dxo.data.get(SplitNNConstants.DATA)
if activations is None:
raise ValueError("No activations in DXO!")
gradient = self._train_step_label_side(
batch_indices=batch_indices, activations=fobs.loads(activations), fl_ctx=fl_ctx
)
self.log_debug(fl_ctx, "_aux_train_label_side finished.")
return_shareable = DXO(
data={SplitNNConstants.DATA: fobs.dumps(gradient)}, data_kind=SplitNNDataKind.GRADIENT
).to_shareable()
self.compute_stats_pool.record_value(category="_aux_train_label_side", value=timer() - t_start)
self.log_debug(fl_ctx, f"Sending train label return_shareable: {type(return_shareable)}")
return return_shareable
def _aux_val_label_side(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""validation aux message handler"""
t_start = timer()
if self.split_id != 1:
raise ValueError(
f"Expected `split_id` 1. It doesn't make sense to run `_aux_train_label_side` with `split_id` {self.split_id}"
)
val_round = request.get_header(AppConstants.CURRENT_ROUND)
val_num_rounds = request.get_header(AppConstants.NUM_ROUNDS)
self.log_debug(fl_ctx, f"Validate label in round {self.current_round} of {self.num_rounds} rounds.")
dxo = from_shareable(request)
if dxo.data_kind != SplitNNDataKind.ACTIVATIONS:
raise ValueError(f"Expected data kind {SplitNNDataKind.ACTIVATIONS} but received {dxo.data_kind}")
batch_indices = dxo.get_meta_prop(SplitNNConstants.BATCH_INDICES)
if batch_indices is None:
raise ValueError("No batch indices in DXO!")
activations = dxo.data.get(SplitNNConstants.DATA)
if activations is None:
raise ValueError("No activations in DXO!")
self._val_step_label_side(batch_indices=batch_indices, activations=fobs.loads(activations), fl_ctx=fl_ctx)
if val_round == val_num_rounds - 1:
self._log_validation(fl_ctx)
self.compute_stats_pool.record_value(category="_aux_val_label_side", value=timer() - t_start)
return make_reply(ReturnCode.OK)
def _backward_data_side(self, gradient, fl_ctx: FLContext) -> Shareable:
t_start = timer()
if self.split_id != 0:
raise ValueError(
f"Expected `split_id` 0. It doesn't make sense to run `_backward_data_side` with `split_id` {self.split_id}"
)
self._backward_step_data_side(gradient=fobs.loads(gradient), fl_ctx=fl_ctx)
self.log_debug(fl_ctx, "_backward_data_side finished.")
self.compute_stats_pool.record_value(category="_backward_data_side", value=timer() - t_start)
return make_reply(ReturnCode.OK)
# Model initialization task (one time only in beginning)
def init_model(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
t_start = timer()
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed.") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError("No global weights loaded!")
self.compute_stats_pool.record_value(category="init_model", value=timer() - t_start)
self.log_info(fl_ctx, "init_model finished.")
return make_reply(ReturnCode.OK)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
t_start = timer()
"""main training logic"""
engine = fl_ctx.get_engine()
self.num_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
if not self.num_rounds:
raise ValueError("No number of rounds available.")
self.batch_size = shareable.get_header(SplitNNConstants.BATCH_SIZE)
self.target_names = np.asarray(
shareable.get_header(SplitNNConstants.TARGET_NAMES)
) # convert to array for string matching below
self.other_client = self.target_names[self.target_names != self.client_name][0]
self.log_info(fl_ctx, f"Starting training of {self.num_rounds} rounds with batch size {self.batch_size}")
gradients = None # initial gradients
for _curr_round in range(self.num_rounds):
self.current_round = _curr_round
if self.split_id != 0:
continue # only run this logic on first site
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_debug(fl_ctx, f"Starting current round={self.current_round} of {self.num_rounds}.")
self.train_batch_indices = np.random.randint(0, self.train_size - 1, self.batch_size)
# Site-1 image forward & backward (from 2nd round)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self.current_round, private=True, sticky=False)
activations = self._train_forward_backward_data_side(fl_ctx, gradients)
# Site-2 label loss & backward
dxo = DXO(data={SplitNNConstants.DATA: fobs.dumps(activations)}, data_kind=SplitNNDataKind.ACTIVATIONS)
dxo.set_meta_prop(SplitNNConstants.BATCH_INDICES, self.train_batch_indices)
data_shareable = dxo.to_shareable()
data_shareable.set_header(AppConstants.CURRENT_ROUND, self.current_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, self.num_rounds)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, self.current_round)
# send to other side
result = engine.send_aux_request(
targets=self.other_client,
topic=SplitNNConstants.TASK_TRAIN_LABEL_STEP,
request=data_shareable,
timeout=SplitNNConstants.TIMEOUT,
fl_ctx=fl_ctx,
)
shareable = result.get(self.other_client)
if shareable is not None:
dxo = from_shareable(shareable)
if dxo.data_kind != SplitNNDataKind.GRADIENT:
raise ValueError(f"Expected data kind {SplitNNDataKind.GRADIENT} but received {dxo.data_kind}")
gradients = dxo.data.get(SplitNNConstants.DATA)
else:
raise ValueError(f"No message returned from {self.other_client}!")
self.log_debug(fl_ctx, f"Ending current round={self.current_round}.")
if self.val_freq > 0:
if _curr_round % self.val_freq == 0:
self._validate(fl_ctx)
self.compute_stats_pool.record_value(category="train", value=timer() - t_start)
return make_reply(ReturnCode.OK)
def _validate(self, fl_ctx: FLContext):
t_start = timer()
engine = fl_ctx.get_engine()
idx = np.arange(len(self.valid_dataset))
n_batches = int(np.ceil(len(self.valid_dataset) / self.batch_size))
for _val_round, _val_batch_indices in enumerate(np.array_split(idx, n_batches)):
activations = self._val_step_data_side(batch_indices=_val_batch_indices)
# Site-2 label loss & accuracy
dxo = DXO(data={SplitNNConstants.DATA: fobs.dumps(activations)}, data_kind=SplitNNDataKind.ACTIVATIONS)
dxo.set_meta_prop(SplitNNConstants.BATCH_INDICES, _val_batch_indices)
data_shareable = dxo.to_shareable()
data_shareable.set_header(AppConstants.CURRENT_ROUND, _val_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, n_batches)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, _val_round)
# send to other side to validate
engine.send_aux_request(
targets=self.other_client,
topic=SplitNNConstants.TASK_VALID_LABEL_STEP,
request=data_shareable,
timeout=SplitNNConstants.TIMEOUT,
fl_ctx=fl_ctx,
)
self.compute_stats_pool.record_value(category="_validate", value=timer() - t_start)
self.log_debug(fl_ctx, "finished validation.")
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/splitnn/cifar10_learner_splitnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from splitnn.cifar10_learner_splitnn import CIFAR10LearnerSplitNN
from splitnn.cifar10_splitnn_dataset import CIFAR10SplitNN
from splitnn.cifar10_vertical_data_splitter import Cifar10VerticalDataSplitter
from splitnn.split_nn import SplitNN
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/splitnn/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
from pt.utils.cifar10_data_utils import get_site_class_summary, load_cifar10_data
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
class Cifar10VerticalDataSplitter(FLComponent):
def __init__(self, split_dir: str = None, overlap: int = 10_000, seed: int = 0):
super().__init__()
self.split_dir = split_dir
self.overlap = overlap
self.seed = seed
if self.split_dir is None:
raise ValueError("You need to define a valid `split_dir` when splitting the data.")
if overlap <= 0:
raise ValueError(f"Alpha should be larger 0 but was {overlap}!")
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.split(fl_ctx)
def split(self, fl_ctx: FLContext):
np.random.seed(self.seed)
self.log_info(fl_ctx, f"Partition CIFAR-10 dataset into vertically with {self.overlap} overlapping samples.")
site_idx, class_sum = self._split_data()
# write to files
if not os.path.isdir(self.split_dir):
os.makedirs(self.split_dir)
sum_file_name = os.path.join(self.split_dir, "summary.txt")
with open(sum_file_name, "w") as sum_file:
sum_file.write("Class counts for overlap: \n")
sum_file.write(json.dumps(class_sum))
for _site, _idx in site_idx.items():
site_file_name = os.path.join(self.split_dir, f"{_site}.npy")
self.log_info(fl_ctx, f"save {site_file_name}")
np.save(site_file_name, _idx)
def _split_data(self):
train_label = load_cifar10_data()
n_samples = len(train_label)
if self.overlap > n_samples:
raise ValueError(
f"Chosen overlap of {self.overlap} is larger than " f"train dataset with {n_samples} entries."
)
sample_idx = np.arange(0, n_samples)
overlap_idx = np.random.choice(sample_idx, size=np.int64(self.overlap), replace=False)
remain_idx = list(set(sample_idx) - set(overlap_idx))
idx_1 = np.concatenate((overlap_idx, np.array(remain_idx)))
# adding n_samples to remain_idx of site-2 to make sure no overlap
# with idx_1
idx_2 = np.concatenate((overlap_idx, np.array(remain_idx) + n_samples))
# shuffle indexes again for client sites to simulate real world
# scenario
np.random.shuffle(idx_1)
np.random.shuffle(idx_2)
site_idx = {"overlap": overlap_idx, "site-1": idx_1, "site-2": idx_2}
# collect class summary
class_sum = get_site_class_summary(train_label, {"overlap": overlap_idx})
return site_idx, class_sum
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/splitnn/cifar10_vertical_data_splitter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torchvision import datasets
class CIFAR10SplitNN(object): # TODO: use torch.utils.data.Dataset with batch sampling
def __init__(self, root, train=True, transform=None, download=False, returns="all", intersect_idx=None):
"""CIFAR-10 dataset with index to extract a mini-batch based on given batch indices
Useful for SplitNN training
Args:
root: data root
data_idx: to specify the data for a particular client site.
If index provided, extract subset, otherwise use the whole set
train: whether to use the training or validation split (default: True)
transform: image transforms
download: whether to download the data (default: False)
returns: specify which data the client has
intersect_idx: indices of samples intersecting between both
participating sites. Intersection indices will be sorted to
ensure that data is aligned on both sites.
Returns:
A PyTorch dataset
"""
self.root = root
self.train = train
self.transform = transform
self.download = download
self.returns = returns
self.intersect_idx = intersect_idx
self.orig_size = 0
if self.intersect_idx is not None:
self.intersect_idx = np.sort(self.intersect_idx).astype(np.int64)
self.data, self.target = self.__build_cifar_subset__()
def __build_cifar_subset__(self):
# if intersect index provided, extract subset, otherwise use the whole
# set
cifar_dataobj = datasets.CIFAR10(self.root, self.train, self.transform, self.download)
data = cifar_dataobj.data
target = np.array(cifar_dataobj.targets)
self.orig_size = len(data)
if self.intersect_idx is not None:
data = data[self.intersect_idx]
target = target[self.intersect_idx]
return data, target
def __getitem__(self, index):
img, target = self.data[index], self.target[index]
if self.transform is not None:
img = self.transform(img)
return img, target
# TODO: this can probably made more efficient using batch_sampler
def get_batch(self, batch_indices):
img_batch = []
target_batch = []
for idx in batch_indices:
img, target = self.__getitem__(idx)
img_batch.append(img)
target_batch.append(torch.tensor(target, dtype=torch.long))
img_batch = torch.stack(img_batch, dim=0)
target_batch = torch.stack(target_batch, dim=0)
if self.returns == "all":
return img_batch, target_batch
elif self.returns == "image":
return img_batch
elif self.returns == "label":
return target_batch
else:
raise ValueError(f"Expected `returns` to be 'all', 'image', or 'label', but got '{self.returns}'")
def __len__(self):
return len(self.data)
| NVFlare-main | examples/advanced/vertical_federated_learning/cifar10-splitnn/src/splitnn/cifar10_splitnn_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import List
import pandas as pd
from nvflare.app_common.psi.psi_spec import PSI
class LocalPSI(PSI):
def __init__(self, psi_writer_id: str, data_root_dir: str = "/tmp/nvflare/psi/data"):
super().__init__(psi_writer_id)
self.data_root_dir = data_root_dir
self.data = {}
def load_items(self) -> List[str]:
site = self.fl_ctx.get_identity_name()
data_path = os.path.join(self.data_root_dir, site, "data.csv")
if os.path.isfile(data_path):
df = pd.read_csv(data_path)
else:
raise RuntimeError(f"invalid data path {data_path}")
# important the PSI algorithms requires the items are unique
items = df.email_address.to_list()
return items
| NVFlare-main | examples/advanced/psi/user_email_match/jobs/user_email_match/app/custom/local_psi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import uuid
from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner, api_command_wrapper
def read_json(filename):
assert os.path.isfile(filename), f"{filename} does not exist!"
with open(filename, "r") as f:
return json.load(f)
def write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
parser.add_argument("--username", type=str, default="[email protected]", help="Admin username.")
parser.add_argument("--job", type=str, default="cifar10_fedavg", help="Path to job")
parser.add_argument("--poc", action="store_true", help="Whether admin does not use SSL.")
parser.add_argument("--central", action="store_true", help="Whether we assume all data is centralized.")
parser.add_argument(
"--train_split_root", type=str, default="/tmp/cifar10_splits", help="Location where to save data splits."
)
parser.add_argument(
"--alpha",
type=float,
default=0.0,
help="Value controls the degree of heterogeneity. "
"Lower values of alpha means higher heterogeneity."
"Values of <= 0. means no data sampling. "
"Assumes central training.",
)
args = parser.parse_args()
assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
# Initialize the runner
runner = FLAdminAPIRunner(
username=args.username,
admin_dir=args.admin_dir,
poc=args.poc,
debug=False,
)
# update alpha and split data dir
job_name = os.path.basename(args.job)
client_config_filename = os.path.join(args.job, job_name, "config", "config_fed_client.json")
server_config_filename = os.path.join(args.job, job_name, "config", "config_fed_server.json")
meta_config_filename = os.path.join(args.job, "meta.json")
if args.alpha > 0.0:
client_config = read_json(client_config_filename)
server_config = read_json(server_config_filename)
meta_config = read_json(meta_config_filename)
print(f"Set alpha to {args.alpha}")
token = str(uuid.uuid4())
job_name = f"{job_name}_alpha{args.alpha}"
server_config["alpha"] = args.alpha
meta_config["name"] = job_name
split_dir = os.path.join(args.train_split_root, f"{job_name}_{token}")
print(f"Set train split root to {split_dir}")
server_config["TRAIN_SPLIT_ROOT"] = split_dir
client_config["TRAIN_SPLIT_ROOT"] = split_dir
write_json(client_config, client_config_filename)
write_json(server_config, server_config_filename)
write_json(meta_config, meta_config_filename)
else:
print("Assuming centralized training.")
# Submit job
api_command_wrapper(runner.api.submit_job(args.job))
# finish
runner.api.logout()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/cifar10/cifar10-real-world/submit_job.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
# secure workspace
client_results_root = "./workspaces/secure_workspace/site-1"
download_dir = "./workspaces/secure_workspace/[email protected]/transfer"
# poc workspace
# client_results_root = "./workspaces/poc_workspace/site-1"
# download_dir = "./workspaces/poc_workspace/admin/transfer"
# 4.1 Central vs. FedAvg
experiments = {
"cifar10_fedavg_stream_tb": {"tag": "val_acc_global_model", "alpha": 1.0},
"cifar10_fedavg_he": {"tag": "val_acc_global_model", "alpha": 1.0},
}
add_cross_site_val = True
def find_job_id(workdir, fl_app_name="cifar10_fedavg", alpha=None):
"""Find the first matching experiment"""
# TODO: return several experiment job_ids with matching settings
fl_app_files = glob.glob(os.path.join(workdir, "**", "fl_app.txt"), recursive=True)
assert len(fl_app_files) > 0, f"No `fl_app.txt` files found in workdir={workdir}."
for fl_app_file in fl_app_files:
with open(fl_app_file, "r") as f:
_fl_app_name = f.read()
if fl_app_name == _fl_app_name: # alpha will be matched based on value in config file
job_id = os.path.basename(
os.path.dirname(os.path.dirname(os.path.join(fl_app_file)))
) # skip "workspace" subfolder
if alpha is not None:
config_fed_server_file = glob.glob(
os.path.join(os.path.dirname(fl_app_file), "**", "config_fed_server.json"), recursive=True
)
assert (
len(config_fed_server_file) == 1
), f"No unique server config found in {os.path.dirname(fl_app_file)}"
with open(config_fed_server_file[0], "r") as f:
server_config = json.load(f)
_alpha = server_config["alpha"]
if _alpha == alpha:
return job_id
else:
return job_id
raise ValueError(f"No job id found for fl_app_name={fl_app_name} in workdir={workdir}")
def read_eventfile(filepath, tags=["val_acc_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
# print(v.tag, summary.step, v.simple_value)
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_acc_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
# print(event_data)
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Step"].append(e[0])
data["Accuracy"].append(e[1])
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
data = {"Config": [], "Step": [], "Accuracy": []}
if add_cross_site_val:
xsite_keys = ["SRV_FL_global_model.pt", "SRV_best_FL_global_model.pt"]
xsite_data = {"Config": []}
for k in xsite_keys:
xsite_data.update({k: []})
else:
xsite_data = None
xsite_keys = None
# add event files
for config, exp in experiments.items():
config_name = config.split(" ")[0]
alpha = exp.get("alpha", None)
job_id = find_job_id(workdir=download_dir, fl_app_name=config_name, alpha=alpha)
print(f"Found run {job_id} for {config_name} with alpha={alpha}")
eventfile = glob.glob(os.path.join(client_results_root, job_id, "**", "events.*"), recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
if add_cross_site_val:
xsite_file = glob.glob(os.path.join(download_dir, job_id, "**", "cross_val_results.json"), recursive=True)
assert len(xsite_file) == 1, "No unique x-site file found!"
with open(xsite_file[0], "r") as f:
xsite_results = json.load(f)
xsite_data["Config"].append(config)
for k in xsite_keys:
try:
xsite_data[k].append(xsite_results["site-1"][k]["val_accuracy"])
except Exception as e:
raise ValueError(f"No val_accuracy for {k} in {xsite_file}!")
print("Training TB data:")
print(pd.DataFrame(data))
if xsite_data:
print("Cross-site val data:")
print(pd.DataFrame(xsite_data))
sns.lineplot(x="Step", y="Accuracy", hue="Config", data=data)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/cifar10/cifar10-real-world/figs/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Dirichlet sampling strategy for creating a heterogeneous partition is adopted
# from FedMA (https://github.com/IBM/FedMA).
# MIT License
# Copyright (c) 2020 International Business Machines
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torchvision.datasets as datasets
CIFAR10_ROOT = "/tmp/cifar10" # will be used for all CIFAR-10 experiments
def load_cifar10_data():
# load data
train_dataset = datasets.CIFAR10(root=CIFAR10_ROOT, train=True, download=True)
# only training label is needed for doing split
train_label = np.array(train_dataset.targets)
return train_label
def get_site_class_summary(train_label, site_idx):
class_sum = {}
for site, data_idx in site_idx.items():
unq, unq_cnt = np.unique(train_label[data_idx], return_counts=True)
tmp = {int(unq[i]): int(unq_cnt[i]) for i in range(len(unq))}
class_sum[site] = tmp
return class_sum
def main():
load_cifar10_data()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/cifar10/pt/utils/cifar10_data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Dirichlet sampling strategy for creating a heterogeneous partition is adopted
# from FedMA (https://github.com/IBM/FedMA).
# MIT License
# Copyright (c) 2020 International Business Machines
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
import numpy as np
from pt.utils.cifar10_data_utils import get_site_class_summary, load_cifar10_data
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
class Cifar10DataSplitter(FLComponent):
def __init__(self, split_dir: str = None, num_sites: int = 8, alpha: float = 0.5, seed: int = 0):
super().__init__()
self.split_dir = split_dir
self.num_sites = num_sites
self.alpha = alpha
self.seed = seed
if self.split_dir is None:
raise ValueError("You need to define a valid `split_dir` for splitting the data.")
if not os.path.isabs(self.split_dir):
raise ValueError("`split_dir` needs to be absolute path.")
if alpha < 0.0:
raise ValueError(f"Alpha should be larger or equal 0.0 but was" f" {alpha}!")
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.split(fl_ctx)
def split(self, fl_ctx: FLContext):
np.random.seed(self.seed)
self.log_info(
fl_ctx,
f"Partition CIFAR-10 dataset into {self.num_sites} sites with Dirichlet sampling under alpha {self.alpha}",
)
site_idx, class_sum = self._partition_data()
# write to files
if not os.path.isdir(self.split_dir):
os.makedirs(self.split_dir)
sum_file_name = os.path.join(self.split_dir, "summary.txt")
with open(sum_file_name, "w") as sum_file:
sum_file.write(f"Number of clients: {self.num_sites} \n")
sum_file.write(f"Dirichlet sampling parameter: {self.alpha} \n")
sum_file.write("Class counts for each client: \n")
sum_file.write(json.dumps(class_sum))
site_file_path = os.path.join(self.split_dir, "site-")
for site in range(self.num_sites):
site_file_name = site_file_path + str(site + 1) + ".npy"
np.save(site_file_name, np.array(site_idx[site]))
def _partition_data(self):
train_label = load_cifar10_data()
min_size = 0
K = 10
N = train_label.shape[0]
site_idx = {}
# split
while min_size < 10:
idx_batch = [[] for _ in range(self.num_sites)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(train_label == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(self.alpha, self.num_sites))
# Balance
proportions = np.array(
[p * (len(idx_j) < N / self.num_sites) for p, idx_j in zip(proportions, idx_batch)]
)
proportions = proportions / proportions.sum()
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
# shuffle
for j in range(self.num_sites):
np.random.shuffle(idx_batch[j])
site_idx[j] = idx_batch[j]
# collect class summary
class_sum = get_site_class_summary(train_label, site_idx)
return site_idx, class_sum
| NVFlare-main | examples/advanced/cifar10/pt/utils/cifar10_data_splitter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torchvision import datasets
class CIFAR10_Idx(torch.utils.data.Dataset):
def __init__(self, root, data_idx=None, train=True, transform=None, download=False):
"""CIFAR-10 dataset with index to extract subset
Args:
root: data root
data_idx: to specify the data for a particular client site.
If index provided, extract subset, otherwise use the whole set
train: whether to use the training or validation split (default: True)
transform: image transforms
download: whether to download the data (default: False)
Returns:
A PyTorch dataset
"""
self.root = root
self.data_idx = data_idx
self.train = train
self.transform = transform
self.download = download
self.data, self.target = self.__build_cifar_subset__()
def __build_cifar_subset__(self):
# if index provided, extract subset, otherwise use the whole set
cifar_dataobj = datasets.CIFAR10(self.root, self.train, self.transform, self.download)
data = cifar_dataobj.data
target = np.array(cifar_dataobj.targets)
if self.data_idx is not None:
data = data[self.data_idx]
target = target[self.data_idx]
return data, target
def __getitem__(self, index):
img, target = self.data[index], self.target[index]
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
| NVFlare-main | examples/advanced/cifar10/pt/utils/cifar10_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from typing import Union
import numpy as np
import torch
import torch.optim as optim
from pt.networks.cifar10_nets import ModerateCNN
from pt.utils.cifar10_data_utils import CIFAR10_ROOT
from pt.utils.cifar10_dataset import CIFAR10_Idx
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from nvflare.apis.fl_constant import FLMetaKey, ReturnCode
from nvflare.app_common.abstract.fl_model import FLModel, ParamsType
from nvflare.app_common.abstract.model_learner import ModelLearner
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
from nvflare.app_common.utils.fl_model_utils import FLModelUtils
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class CIFAR10ModelLearner(ModelLearner): # does not support CIFAR10ScaffoldLearner
def __init__(
self,
train_idx_root: str = "./dataset",
aggregation_epochs: int = 1,
lr: float = 1e-2,
fedproxloss_mu: float = 0.0,
central: bool = False,
analytic_sender_id: str = "analytic_sender",
batch_size: int = 64,
num_workers: int = 0,
):
"""Simple CIFAR-10 Trainer.
Args:
train_idx_root: directory with site training indices for CIFAR-10 data.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
lr: local learning rate. Float number. Defaults to 1e-2.
fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx).
central: Bool. Whether to simulate central training. Default False.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
batch_size: batch size for training and validation.
num_workers: number of workers for data loaders.
Returns:
an FLModel with the updated local model differences after running `train()`, the metrics after `validate()`,
or the best local model depending on the specified task.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.train_idx_root = train_idx_root
self.aggregation_epochs = aggregation_epochs
self.lr = lr
self.fedproxloss_mu = fedproxloss_mu
self.best_acc = 0.0
self.central = central
self.batch_size = batch_size
self.num_workers = num_workers
self.analytic_sender_id = analytic_sender_id
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# following will be created in initialize() or later
self.local_model_file = None
self.best_local_model_file = None
self.writer = None
self.device = None
self.model = None
self.optimizer = None
self.criterion = None
self.criterion_prox = None
self.transform_train = None
self.transform_valid = None
self.train_dataset = None
self.valid_dataset = None
self.train_loader = None
self.valid_loader = None
def initialize(self):
"""
Note: this code assumes a FL simulation setting
Datasets will be initialized in train() and validate() when calling self._create_datasets()
as we need to make sure that the server has already downloaded and split the data.
"""
# when the run starts, this is where the actual settings get initialized for trainer
self.info(
f"Client {self.site_name} initialized at \n {self.app_root} \n with args: {self.args}",
)
self.local_model_file = os.path.join(self.app_root, "local_model.pt")
self.best_local_model_file = os.path.join(self.app_root, "best_local_model.pt")
# Select local TensorBoard writer or event-based writer for streaming
self.writer = self.get_component(
self.analytic_sender_id
) # user configured config_fed_client.json for streaming
if not self.writer: # use local TensorBoard writer only
self.writer = SummaryWriter(self.app_root)
# set the training-related parameters
# can be replaced by a config-style block
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = ModerateCNN().to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
self.criterion = torch.nn.CrossEntropyLoss()
if self.fedproxloss_mu > 0:
self.info(f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
self.transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(4, padding_mode="reflect"),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.transform_valid = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
def _create_datasets(self):
"""To be called only after Cifar10DataSplitter downloaded the data and computed splits"""
if self.train_dataset is None or self.train_loader is None:
if not self.central:
# Set datalist, here the path and filename are hard-coded, can also be fed as an argument
site_idx_file_name = os.path.join(self.train_idx_root, self.site_name + ".npy")
self.info(f"IndexList Path: {site_idx_file_name}")
if os.path.exists(site_idx_file_name):
self.info("Loading subset index")
site_idx = np.load(site_idx_file_name).tolist() # TODO: get from server?
else:
self.stop_task(f"No subset index found! File {site_idx_file_name} does not exist!")
return
self.info(f"Client subset size: {len(site_idx)}")
else:
site_idx = None # use whole training dataset if self.central=True
self.train_dataset = CIFAR10_Idx(
root=CIFAR10_ROOT,
data_idx=site_idx,
train=True,
download=False,
transform=self.transform_train,
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers
)
if self.valid_dataset is None or self.valid_loader is None:
self.valid_dataset = datasets.CIFAR10(
root=CIFAR10_ROOT,
train=False,
download=False,
transform=self.transform_valid,
)
self.valid_loader = torch.utils.data.DataLoader(
self.valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers
)
def finalize(self):
# collect threads, close files here
pass
def local_train(self, train_loader, model_global, val_freq: int = 0):
for epoch in range(self.aggregation_epochs):
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.info(f"Local epoch {self.site_name}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})")
avg_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
avg_loss += loss.item()
self.writer.add_scalar("train_loss", avg_loss / len(train_loader), current_step)
if val_freq > 0 and epoch % val_freq == 0:
acc = self.local_valid(self.valid_loader, tb_id="val_acc_local_model")
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
def save_model(self, is_best=False):
# save model
model_weights = self.model.state_dict()
save_dict = {"model_weights": model_weights, "epoch": self.epoch_global}
if is_best:
save_dict.update({"best_acc": self.best_acc})
torch.save(save_dict, self.best_local_model_file)
else:
torch.save(save_dict, self.local_model_file)
def train(self, model: FLModel) -> Union[str, FLModel]:
self._create_datasets()
# get round information
self.info(f"Current/Total Round: {self.current_round + 1}/{self.total_rounds}")
self.info(f"Client identity: {self.site_name}")
# update local model weights with received weights
global_weights = model.params
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except BaseException as e:
raise ValueError(f"Convert weight from {var_name} failed") from e
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.info(f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss or SCAFFOLD
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# local train
self.local_train(
train_loader=self.train_loader,
model_global=model_global,
val_freq=1 if self.central else 0,
)
self.epoch_of_start_time += self.aggregation_epochs
# perform valid after local train
acc = self.local_valid(self.valid_loader, tb_id="val_acc_local_model")
self.info(f"val_acc_local_model: {acc:.4f}")
# save model
self.save_model(is_best=False)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.stop_task(f"{name} weights became NaN...")
return ReturnCode.EXECUTION_EXCEPTION
# return an FLModel containing the model differences
fl_model = FLModel(params_type=ParamsType.DIFF, params=model_diff)
FLModelUtils.set_meta_prop(fl_model, FLMetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.info("Local epochs finished. Returning FLModel")
return fl_model
def get_model(self, model_name: str) -> Union[str, FLModel]:
# Retrieve the best local model saved during training.
if model_name == ModelName.BEST_MODEL:
try:
# load model to cpu as server might or might not have a GPU
model_data = torch.load(self.best_local_model_file, map_location="cpu")
except Exception as e:
raise ValueError("Unable to load best model") from e
# Create FLModel from model data.
if model_data:
# convert weights to numpy to support FOBS
model_weights = model_data["model_weights"]
for k, v in model_weights.items():
model_weights[k] = v.numpy()
return FLModel(params_type=ParamsType.FULL, params=model_weights)
else:
# Set return code.
self.error(f"best local model not found at {self.best_local_model_file}.")
return ReturnCode.EXECUTION_RESULT_ERROR
else:
raise ValueError(f"Unknown model_type: {model_name}") # Raised errors are caught in LearnerExecutor class.
def local_valid(self, valid_loader, tb_id=None):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
for _i, (inputs, labels) in enumerate(valid_loader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = self.model(inputs)
_, pred_label = torch.max(outputs.data, 1)
total += inputs.data.size()[0]
correct += (pred_label == labels.data).sum().item()
metric = correct / float(total)
if tb_id:
self.writer.add_scalar(tb_id, metric, self.epoch_global)
return metric
def validate(self, model: FLModel) -> Union[str, FLModel]:
self._create_datasets()
# get validation information
self.info(f"Client identity: {self.site_name}")
# update local model weights with received weights
global_weights = model.params
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except BaseException as e:
raise ValueError(f"Convert weight from {var_name} failed") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# get validation meta info
validate_type = FLModelUtils.get_meta_prop(
model, FLMetaKey.VALIDATE_TYPE, ValidateType.MODEL_VALIDATE
) # TODO: enable model.get_meta_prop(...)
model_owner = self.get_shareable_header(AppConstants.MODEL_OWNER)
# perform valid
train_acc = self.local_valid(
self.train_loader,
tb_id="train_acc_global_model" if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE else None,
)
self.info(f"training acc ({model_owner}): {train_acc:.4f}")
val_acc = self.local_valid(
self.valid_loader,
tb_id="val_acc_global_model" if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE else None,
)
self.info(f"validation acc ({model_owner}): {val_acc:.4f}")
self.info("Evaluation finished. Returning result")
if val_acc > self.best_acc:
self.best_acc = val_acc
self.save_model(is_best=True)
val_results = {"train_accuracy": train_acc, "val_accuracy": val_acc}
return FLModel(metrics=val_results)
| NVFlare-main | examples/advanced/cifar10/pt/learners/cifar10_model_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import numpy as np
import torch
import torch.optim as optim
from pt.networks.cifar10_nets import ModerateCNN
from pt.utils.cifar10_data_utils import CIFAR10_ROOT
from pt.utils.cifar10_dataset import CIFAR10_Idx
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class CIFAR10Learner(Learner): # also supports CIFAR10ScaffoldLearner
def __init__(
self,
train_idx_root: str = "./dataset",
aggregation_epochs: int = 1,
lr: float = 1e-2,
fedproxloss_mu: float = 0.0,
central: bool = False,
analytic_sender_id: str = "analytic_sender",
batch_size: int = 64,
num_workers: int = 0,
):
"""Simple CIFAR-10 Trainer.
Args:
train_idx_root: directory with site training indices for CIFAR-10 data.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
lr: local learning rate. Float number. Defaults to 1e-2.
fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx).
central: Bool. Whether to simulate central training. Default False.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
batch_size: batch size for training and validation.
num_workers: number of workers for data loaders.
Returns:
a Shareable with the updated local model after running `execute()`
or the best local model depending on the specified task.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.train_idx_root = train_idx_root
self.aggregation_epochs = aggregation_epochs
self.lr = lr
self.fedproxloss_mu = fedproxloss_mu
self.best_acc = 0.0
self.central = central
self.batch_size = batch_size
self.num_workers = num_workers
self.writer = None
self.analytic_sender_id = analytic_sender_id
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# following will be created in initialize() or later
self.app_root = None
self.client_id = None
self.local_model_file = None
self.best_local_model_file = None
self.writer = None
self.device = None
self.model = None
self.optimizer = None
self.criterion = None
self.criterion_prox = None
self.transform_train = None
self.transform_valid = None
self.train_dataset = None
self.valid_dataset = None
self.train_loader = None
self.valid_loader = None
def initialize(self, parts: dict, fl_ctx: FLContext):
"""
Note: this code assumes a FL simulation setting
Datasets will be initialized in train() and validate() when calling self._create_datasets()
as we need to make sure that the server has already downloaded and split the data.
"""
# when the run starts, this is where the actual settings get initialized for trainer
# Set the paths according to fl_ctx
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized at \n {self.app_root} \n with args: {fl_args}",
)
self.local_model_file = os.path.join(self.app_root, "local_model.pt")
self.best_local_model_file = os.path.join(self.app_root, "best_local_model.pt")
# Select local TensorBoard writer or event-based writer for streaming
self.writer = parts.get(self.analytic_sender_id) # user configured config_fed_client.json for streaming
if not self.writer: # use local TensorBoard writer only
self.writer = SummaryWriter(self.app_root)
# set the training-related parameters
# can be replaced by a config-style block
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = ModerateCNN().to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
self.criterion = torch.nn.CrossEntropyLoss()
if self.fedproxloss_mu > 0:
self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
self.transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(4, padding_mode="reflect"),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.transform_valid = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
def _create_datasets(self, fl_ctx: FLContext):
"""To be called only after Cifar10DataSplitter downloaded the data and computed splits"""
if self.train_dataset is None or self.train_loader is None:
if not self.central:
# Set datalist, here the path and filename are hard-coded, can also be fed as an argument
site_idx_file_name = os.path.join(self.train_idx_root, self.client_id + ".npy")
self.log_info(fl_ctx, f"IndexList Path: {site_idx_file_name}")
if os.path.exists(site_idx_file_name):
self.log_info(fl_ctx, "Loading subset index")
site_idx = np.load(site_idx_file_name).tolist() # TODO: get from fl_ctx/shareable?
else:
self.system_panic(f"No subset index found! File {site_idx_file_name} does not exist!", fl_ctx)
return
self.log_info(fl_ctx, f"Client subset size: {len(site_idx)}")
else:
site_idx = None # use whole training dataset if self.central=True
self.train_dataset = CIFAR10_Idx(
root=CIFAR10_ROOT,
data_idx=site_idx,
train=True,
download=False,
transform=self.transform_train,
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers
)
if self.valid_dataset is None or self.valid_loader is None:
self.valid_dataset = datasets.CIFAR10(
root=CIFAR10_ROOT,
train=False,
download=False,
transform=self.transform_valid,
)
self.valid_loader = torch.utils.data.DataLoader(
self.valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers
)
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(self, fl_ctx, train_loader, model_global, abort_signal: Signal, val_freq: int = 0):
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(fl_ctx, f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})")
avg_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader):
if abort_signal.triggered:
return
inputs, labels = inputs.to(self.device), labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
avg_loss += loss.item()
self.writer.add_scalar("train_loss", avg_loss / len(train_loader), current_step)
if val_freq > 0 and epoch % val_freq == 0:
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
def save_model(self, is_best=False):
# save model
model_weights = self.model.state_dict()
save_dict = {"model_weights": model_weights, "epoch": self.epoch_global}
if is_best:
save_dict.update({"best_acc": self.best_acc})
torch.save(save_dict, self.best_local_model_file)
else:
torch.save(save_dict, self.local_model_file)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self._create_datasets(fl_ctx)
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed") from e
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss or SCAFFOLD
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
val_freq=1 if self.central else 0,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# perform valid after local train
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_local_model: {acc:.4f}")
# save model
self.save_model(is_best=False)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
# Retrieve the best local model saved during training.
if model_name == ModelName.BEST_MODEL:
model_data = None
try:
# load model to cpu as server might or might not have a GPU
model_data = torch.load(self.best_local_model_file, map_location="cpu")
except Exception as e:
raise ValueError("Unable to load best model") from e
# Create DXO and shareable from model data.
if model_data:
# convert weights to numpy to support FOBS
model_weights = model_data["model_weights"]
for k, v in model_weights.items():
model_weights[k] = v.numpy()
dxo = DXO(data_kind=DataKind.WEIGHTS, data=model_weights)
return dxo.to_shareable()
else:
# Set return code.
self.log_error(fl_ctx, f"best local model not found at {self.best_local_model_file}.")
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
else:
raise ValueError(f"Unknown model_type: {model_name}") # Raised errors are caught in LearnerExecutor class.
def local_valid(self, valid_loader, abort_signal: Signal, tb_id=None, fl_ctx=None):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
for _i, (inputs, labels) in enumerate(valid_loader):
if abort_signal.triggered:
return None
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = self.model(inputs)
_, pred_label = torch.max(outputs.data, 1)
total += inputs.data.size()[0]
correct += (pred_label == labels.data).sum().item()
metric = correct / float(total)
if tb_id:
self.writer.add_scalar(tb_id, metric, self.epoch_global)
return metric
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self._create_datasets(fl_ctx)
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get validation information
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
model_owner = shareable.get(ReservedHeaderKey.HEADERS).get(AppConstants.MODEL_OWNER)
if model_owner:
self.log_info(fl_ctx, f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()}")
else:
model_owner = "global_model" # evaluating global model during training
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_global_model", fl_ctx=fl_ctx)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_global_model ({model_owner}): {global_acc}")
return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: global_acc}, meta={}).to_shareable()
elif validate_type == ValidateType.MODEL_VALIDATE:
# perform valid
train_acc = self.local_valid(self.train_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"training acc ({model_owner}): {train_acc}")
val_acc = self.local_valid(self.valid_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"validation acc ({model_owner}): {val_acc}")
self.log_info(fl_ctx, "Evaluation finished. Returning shareable")
val_results = {"train_accuracy": train_acc, "val_accuracy": val_acc}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | examples/advanced/cifar10/pt/learners/cifar10_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from pt.learners.cifar10_learner import CIFAR10Learner
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AlgorithmConstants, AppConstants
from nvflare.app_opt.pt.scaffold import PTScaffoldHelper, get_lr_values
class CIFAR10ScaffoldLearner(CIFAR10Learner):
def __init__(
self,
train_idx_root: str = "./dataset",
aggregation_epochs: int = 1,
lr: float = 1e-2,
fedproxloss_mu: float = 0.0,
central: bool = False,
analytic_sender_id: str = "analytic_sender",
batch_size: int = 64,
num_workers: int = 0,
):
"""Simple Scaffold CIFAR-10 Trainer.
Implements the training algorithm proposed in
Karimireddy et al. "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning"
(https://arxiv.org/abs/1910.06378) using functions implemented in `PTScaffoldHelper` class.
Args:
train_idx_root: directory with site training indices for CIFAR-10 data.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
lr: local learning rate. Float number. Defaults to 1e-2.
fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx).
central: Bool. Whether to simulate central training. Default False.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
batch_size: batch size for training and validation.
num_workers: number of workers for data loaders.
Returns:
a Shareable with the updated local model after running `execute()`
or the best local model depending on the specified task.
"""
CIFAR10Learner.__init__(
self,
train_idx_root=train_idx_root,
aggregation_epochs=aggregation_epochs,
lr=lr,
fedproxloss_mu=fedproxloss_mu,
central=central,
analytic_sender_id=analytic_sender_id,
batch_size=batch_size,
num_workers=num_workers,
)
self.scaffold_helper = PTScaffoldHelper()
def initialize(self, parts: dict, fl_ctx: FLContext):
# Initialize super class and SCAFFOLD
CIFAR10Learner.initialize(self, parts=parts, fl_ctx=fl_ctx)
self.scaffold_helper.init(model=self.model)
def local_train(self, fl_ctx, train_loader, model_global, abort_signal: Signal, val_freq: int = 0):
# local_train with SCAFFOLD steps
c_global_para, c_local_para = self.scaffold_helper.get_params()
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(fl_ctx, f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})")
for i, (inputs, labels) in enumerate(train_loader):
if abort_signal.triggered:
return
inputs, labels = inputs.to(self.device), labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
loss.backward()
self.optimizer.step()
# SCAFFOLD step
curr_lr = get_lr_values(self.optimizer)[0]
self.scaffold_helper.model_update(
model=self.model, curr_lr=curr_lr, c_global_para=c_global_para, c_local_para=c_local_para
)
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
if val_freq > 0 and epoch % val_freq == 0:
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx)
if acc > self.best_acc:
self.save_model(is_best=True)
# Update the SCAFFOLD terms
self.scaffold_helper.terms_update(
model=self.model,
curr_lr=curr_lr,
c_global_para=c_global_para,
c_local_para=c_local_para,
model_global=model_global,
)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# return DXO with extra control differences for SCAFFOLD
dxo_collection = from_shareable(shareable)
if dxo_collection.data_kind != DataKind.COLLECTION:
self.log_error(
fl_ctx,
f"SCAFFOLD learner expected shareable to contain a collection of two DXOs "
f"but got data kind {dxo_collection.data_kind}.",
)
return make_reply(ReturnCode.ERROR)
dxo_global_weights = dxo_collection.data.get(AppConstants.MODEL_WEIGHTS)
dxo_global_ctrl_weights = dxo_collection.data.get(AlgorithmConstants.SCAFFOLD_CTRL_GLOBAL)
if dxo_global_ctrl_weights is None:
self.log_error(fl_ctx, "DXO collection doesn't contain the SCAFFOLD controls!")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# convert to tensor and load into c_global model
global_ctrl_weights = dxo_global_ctrl_weights.data
for k in global_ctrl_weights.keys():
global_ctrl_weights[k] = torch.as_tensor(global_ctrl_weights[k])
self.scaffold_helper.load_global_controls(weights=global_ctrl_weights)
# modify shareable to only contain global weights
shareable = dxo_global_weights.update_shareable(shareable) # TODO: add set_dxo() method to Shareable
# local training
result_shareable = super().train(shareable, fl_ctx, abort_signal)
if result_shareable.get_return_code() == ReturnCode.OK:
# get DXO with weight updates from local training
dxo_weights_diff = from_shareable(result_shareable)
# Create a DXO collection with weights and scaffold controls
dxo_weigths_diff_ctrl = DXO(data_kind=DataKind.WEIGHT_DIFF, data=self.scaffold_helper.get_delta_controls())
# add same num steps as for model weights
dxo_weigths_diff_ctrl.set_meta_prop(
MetaKey.NUM_STEPS_CURRENT_ROUND, dxo_weights_diff.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND)
)
collection_data = {
AppConstants.MODEL_WEIGHTS: dxo_weights_diff,
AlgorithmConstants.SCAFFOLD_CTRL_DIFF: dxo_weigths_diff_ctrl,
}
dxo = DXO(data_kind=DataKind.COLLECTION, data=collection_data)
return dxo.to_shareable()
else:
return result_shareable
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
dxo = from_shareable(shareable)
# If collection, extract only global weights to validate
if dxo.data_kind == DataKind.COLLECTION:
# create a new shareable with only model weights
shareable = copy.deepcopy(shareable) # TODO: Is this the best way?
dxo_global_weights = dxo.data.get(AppConstants.MODEL_WEIGHTS)
shareable = dxo_global_weights.update_shareable(shareable) # TODO: add set_dxo() method to Shareable
return super().validate(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
| NVFlare-main | examples/advanced/cifar10/pt/learners/cifar10_scaffold_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Source of SimpleCNN and moderateCNN: https://github.com/IBM/FedMA/blob/master/model.py,
# SimpleCNN is also from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# MIT License
# Copyright (c) 2020 International Business Machines
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class ModerateCNN(nn.Module):
def __init__(self):
super(ModerateCNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
# nn.Linear(4096, 1024),
nn.Linear(4096, 512),
nn.ReLU(inplace=True),
# nn.Linear(1024, 512),
nn.Linear(512, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.conv_layer(x)
x = x.view(x.size(0), -1)
x = self.fc_layer(x)
return x
| NVFlare-main | examples/advanced/cifar10/pt/networks/cifar10_nets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import uuid
def read_json(filename):
assert os.path.isfile(filename), f"{filename} does not exist!"
with open(filename, "r") as f:
return json.load(f)
def write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job", type=str, default="cifar10_fedavg", help="Path to job.")
parser.add_argument("--poc", action="store_true", help="Whether admin uses POC mode.")
parser.add_argument(
"--train_split_root", type=str, default="/tmp/cifar10_splits", help="Location where to save data splits."
)
parser.add_argument(
"--alpha",
type=float,
default=0.0,
help="Value controls the degree of heterogeneity. "
"Lower values of alpha means higher heterogeneity."
"Values of <= 0. means no data sampling. "
"Assumes central training.",
)
args = parser.parse_args()
# update alpha and split data dir
job_name = os.path.basename(args.job)
client_config_filename = os.path.join(args.job, job_name, "config", "config_fed_client.json")
server_config_filename = os.path.join(args.job, job_name, "config", "config_fed_server.json")
meta_config_filename = os.path.join(args.job, "meta.json")
if args.alpha > 0.0:
client_config = read_json(client_config_filename)
server_config = read_json(server_config_filename)
meta_config = read_json(meta_config_filename)
print(f"Set alpha to {args.alpha}")
token = str(uuid.uuid4())
job_name = f"{job_name}_alpha{args.alpha}"
server_config["alpha"] = args.alpha
meta_config["name"] = job_name
split_dir = os.path.join(args.train_split_root, f"{job_name}_{token}")
print(f"Set train split root to {split_dir}")
server_config["TRAIN_SPLIT_ROOT"] = split_dir
client_config["TRAIN_SPLIT_ROOT"] = split_dir
write_json(client_config, client_config_filename)
write_json(server_config, server_config_filename)
write_json(meta_config, meta_config_filename)
print(f"Updated {meta_config_filename} to alpha={args.alpha}")
else:
print("Assuming centralized training.")
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/cifar10/cifar10-sim/set_alpha.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
# secure workspace
client_results_root = "/tmp/nvflare/sim_cifar10"
# poc workspace
# client_results_root = "./workspaces/poc_workspace/site-1"
# download_dir = "./workspaces/poc_workspace/admin/transfer"
# 4.1 Central vs. FedAvg
experiments = {
"cifar10_central": {"tag": "val_acc_local_model"},
"cifar10_fedavg": {"tag": "val_acc_global_model", "alpha": 1.0},
}
# # 4.2 Impact of client data heterogeneity
# experiments = {"cifar10_fedavg (alpha=1.0)": {"tag": "val_acc_global_model", "alpha": 1.0},
# "cifar10_fedavg (alpha=0.5)": {"tag": "val_acc_global_model", "alpha": 0.5},
# "cifar10_fedavg (alpha=0.3)": {"tag": "val_acc_global_model", "alpha": 0.3},
# "cifar10_fedavg (alpha=0.1)": {"tag": "val_acc_global_model", "alpha": 0.1}
# }
# # 4.3 FedProx vs. FedOpt vs. SCAFFOLD
# experiments = {"cifar10_fedavg": {"tag": "val_acc_global_model", "alpha": 0.1},
# "cifar10_fedprox": {"tag": "val_acc_global_model", "alpha": 0.1},
# "cifar10_fedopt": {"tag": "val_acc_global_model", "alpha": 0.1},
# "cifar10_scaffold": {"tag": "val_acc_global_model", "alpha": 0.1}
# }
add_cross_site_val = True
def read_eventfile(filepath, tags=["val_acc_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
# print(v.tag, summary.step, v.simple_value)
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, config, filepath, tag="val_acc_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
# print(event_data)
for e in event_data[tag]:
# print(e)
data["Config"].append(config)
data["Step"].append(e[0])
data["Accuracy"].append(e[1])
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
data = {"Config": [], "Step": [], "Accuracy": []}
if add_cross_site_val:
xsite_keys = ["SRV_FL_global_model.pt", "SRV_best_FL_global_model.pt"]
xsite_data = {"Config": []}
for k in xsite_keys:
xsite_data.update({k: []})
else:
xsite_data = None
xsite_keys = None
# add event files
for config, exp in experiments.items():
config_name = config.split(" ")[0]
alpha = exp.get("alpha", None)
if alpha:
config_name = config_name + f"*alpha{alpha}"
eventfile = glob.glob(
os.path.join(client_results_root, config_name, "**", "app_site-1", "events.*"), recursive=True
)
assert len(eventfile) == 1, f"No unique event file found in {os.path.join(client_results_root, config_name)}!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, config, eventfile, tag=exp["tag"])
if add_cross_site_val:
xsite_file = glob.glob(
os.path.join(client_results_root, config_name, "**", "cross_val_results.json"), recursive=True
)
assert len(xsite_file) == 1, "No unique x-site file found!"
with open(xsite_file[0], "r") as f:
xsite_results = json.load(f)
xsite_data["Config"].append(config)
for k in xsite_keys:
try:
xsite_data[k].append(xsite_results["site-1"][k]["val_accuracy"])
except Exception as e:
raise ValueError(f"No val_accuracy for {k} in {xsite_file}!")
print("Training TB data:")
print(pd.DataFrame(data))
if xsite_data:
print("Cross-site val data:")
print(pd.DataFrame(xsite_data))
sns.lineplot(x="Step", y="Accuracy", hue="Config", data=data)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | examples/advanced/cifar10/cifar10-sim/figs/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class Net(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense1(x)
x = self.dropout(x)
x = self.dense2(x)
return x
| NVFlare-main | examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
from nvflare.fuel.utils import fobs
class TF2ModelPersistor(ModelPersistor):
def __init__(self, save_name="tf2_model.fobs"):
super().__init__()
self.save_name = save_name
def _initialize(self, fl_ctx: FLContext):
# get save path from FLContext
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
env = None
run_args = fl_ctx.get_prop(FLContextKey.ARGS)
if run_args:
env_config_file_name = os.path.join(app_root, run_args.env)
if os.path.exists(env_config_file_name):
try:
with open(env_config_file_name) as file:
env = json.load(file)
except:
self.system_panic(
reason="error opening env config file {}".format(env_config_file_name), fl_ctx=fl_ctx
)
return
if env is not None:
if env.get("APP_CKPT_DIR", None):
fl_ctx.set_prop(AppConstants.LOG_DIR, env["APP_CKPT_DIR"], private=True, sticky=True)
if env.get("APP_CKPT") is not None:
fl_ctx.set_prop(
AppConstants.CKPT_PRELOAD_PATH,
env["APP_CKPT"],
private=True,
sticky=True,
)
log_dir = fl_ctx.get_prop(AppConstants.LOG_DIR)
if log_dir:
self.log_dir = os.path.join(app_root, log_dir)
else:
self.log_dir = app_root
self._fobs_save_path = os.path.join(self.log_dir, self.save_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""Initializes and loads the Model.
Args:
fl_ctx: FLContext
Returns:
Model object
"""
if os.path.exists(self._fobs_save_path):
self.logger.info("Loading server weights")
with open(self._fobs_save_path, "rb") as f:
model_learnable = fobs.load(f)
else:
self.logger.info("Initializing server model")
network = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
network.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = network(tf.keras.Input(shape=(28, 28)))
var_dict = {network.get_layer(index=key).name: value for key, value in enumerate(network.get_weights())}
model_learnable = make_model_learnable(var_dict, dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Saves model.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
model_learnable_info = {k: str(type(v)) for k, v in model_learnable.items()}
self.logger.info(f"Saving aggregated server weights: \n {model_learnable_info}")
with open(self._fobs_save_path, "wb") as f:
fobs.dump(model_learnable, f)
| NVFlare-main | examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.filter import Filter
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
class ExcludeVars(Filter):
"""
Exclude/Remove variables from Sharable
Args:
exclude_vars: if not specified (None), all layers are being encrypted;
if list of variable/layer names, only specified variables are excluded;
if string containing regular expression (e.g. "conv"), only matched variables are being excluded.
"""
def __init__(self, exclude_vars=None):
super().__init__()
self.exclude_vars = exclude_vars
self.skip = False
if self.exclude_vars is not None:
if not (isinstance(self.exclude_vars, list) or isinstance(self.exclude_vars, str)):
self.skip = True
self.logger.debug("Need to provide a list of layer names or a string for regex matching")
return
if isinstance(self.exclude_vars, list):
for var in self.exclude_vars:
if not isinstance(var, str):
self.skip = True
self.logger.debug("encrypt_layers needs to be a list of layer names to encrypt.")
return
self.logger.debug(f"Excluding {self.exclude_vars} from shareable")
elif isinstance(self.exclude_vars, str):
self.exclude_vars = re.compile(self.exclude_vars) if self.exclude_vars else None
if self.exclude_vars is None:
self.skip = True
self.logger.debug(f'Excluding all layers based on regex matches with "{self.exclude_vars}"')
else:
self.logger.debug("Not excluding anything")
self.skip = True
def process(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable:
self.log_debug(fl_ctx, "inside filter")
if self.skip:
return shareable
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return shareable
assert isinstance(dxo, DXO)
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS):
self.log_debug(fl_ctx, "I cannot handle {}".format(dxo.data_kind))
return shareable
if dxo.data is None:
self.log_debug(fl_ctx, "no data to filter")
return shareable
weights = dxo.data
# parse regex encrypt layers
if isinstance(self.exclude_vars, re.Pattern):
re_pattern = self.exclude_vars
self.exclude_vars = []
for var_name in weights.keys():
if re_pattern.search(var_name):
self.exclude_vars.append(var_name)
self.log_debug(fl_ctx, f"Regex found {self.exclude_vars} matching layers.")
if len(self.exclude_vars) == 0:
self.log_warning(fl_ctx, f"No matching layers found with regex {re_pattern}")
# remove variables
n_excluded = 0
var_names = list(weights.keys()) # needs to recast to list to be used in for loop
n_vars = len(var_names)
for var_name in var_names:
# self.logger.info(f"Checking {var_name}")
if var_name in self.exclude_vars:
self.log_debug(fl_ctx, f"Excluding {var_name}")
weights[var_name] = np.zeros(weights[var_name].shape)
n_excluded += 1
self.log_debug(
fl_ctx,
f"Excluded {n_excluded} of {n_vars} variables. {len(weights.keys())} remaining.",
)
dxo.data = weights
return dxo.update_shareable(shareable)
| NVFlare-main | examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
class SimpleTrainer(Executor):
def __init__(self, epochs_per_round):
super().__init__()
self.epochs_per_round = epochs_per_round
self.train_images, self.train_labels = None, None
self.test_images, self.test_labels = None, None
self.model = None
self.var_list = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.setup(fl_ctx)
def setup(self, fl_ctx: FLContext):
(self.train_images, self.train_labels), (
self.test_images,
self.test_labels,
) = tf.keras.datasets.mnist.load_data()
self.train_images, self.test_images = (
self.train_images / 255.0,
self.test_images / 255.0,
)
# simulate separate datasets for each client by dividing MNIST dataset in half
client_name = fl_ctx.get_identity_name()
if client_name == "site-1":
self.train_images = self.train_images[: len(self.train_images) // 2]
self.train_labels = self.train_labels[: len(self.train_labels) // 2]
self.test_images = self.test_images[: len(self.test_images) // 2]
self.test_labels = self.test_labels[: len(self.test_labels) // 2]
elif client_name == "site-2":
self.train_images = self.train_images[len(self.train_images) // 2 :]
self.train_labels = self.train_labels[len(self.train_labels) // 2 :]
self.test_images = self.test_images[len(self.test_images) // 2 :]
self.test_labels = self.test_labels[len(self.test_labels) // 2 :]
model = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = model(tf.keras.Input(shape=(28, 28)))
self.var_list = [model.get_layer(index=index).name for index in range(len(model.get_weights()))]
self.model = model
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""
This function is an extended function from the super class.
As a supervised learning based trainer, the train function will run
evaluate and train engines based on model weights from `shareable`.
After finishing training, a new `Shareable` object will be submitted
to server for aggregation.
Args:
task_name: dispatched task
shareable: the `Shareable` object received from server.
fl_ctx: the `FLContext` object received from server.
abort_signal: if triggered, the training will be aborted.
Returns:
a new `Shareable` object to be submitted to server for aggregation.
"""
# retrieve model weights download from server's shareable
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
if task_name != "train":
return make_reply(ReturnCode.TASK_UNKNOWN)
dxo = from_shareable(shareable)
model_weights = dxo.data
# use previous round's client weights to replace excluded layers from server
prev_weights = {
self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())
}
ordered_model_weights = {key: model_weights.get(key) for key in prev_weights}
for key in self.var_list:
value = ordered_model_weights.get(key)
if np.all(value == 0):
ordered_model_weights[key] = prev_weights[key]
# update local model weights with received weights
self.model.set_weights(list(ordered_model_weights.values()))
# adjust LR or other training time info as needed
# such as callback in the fit function
self.model.fit(
self.train_images,
self.train_labels,
epochs=self.epochs_per_round,
validation_data=(self.test_images, self.test_labels),
)
# report updated weights in shareable
weights = {self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())}
dxo = DXO(data_kind=DataKind.WEIGHTS, data=weights)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
new_shareable = dxo.to_shareable()
return new_shareable
| NVFlare-main | examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/trainer.py |
Subsets and Splits