python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc_tools import protoc
protoc.main((
'',
'-Iprotos',
'--python_out=.',
'--grpc_python_out=.',
'proto/riva_nlp.proto',
))
| speechsquad-master | reference/qa/run_codegen.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: fileencoding=utf-8
#
# Copyright © 2009 Adrian Perez <[email protected]>
#
# Distributed under terms of the GPLv2 license or newer.
#
# Frank Marien ([email protected]) 6 Sep 2012
# - quick fixes for 5.1 binary protocol
# - updated to python 3
# - fixed for larger packet sizes (possible on lo interface)
# - fixed comment typo (decode_network_string decodes a string)
"""
Collectd network protocol implementation.
"""
import socket,struct
import platform
if platform.python_version() < '2.8.0':
# Python 2.7 and below io.StringIO does not like unicode
from StringIO import StringIO
else:
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
from datetime import datetime
from copy import deepcopy
DEFAULT_PORT = 25826
"""Default port"""
DEFAULT_IPv4_GROUP = "239.192.74.66"
"""Default IPv4 multicast group"""
DEFAULT_IPv6_GROUP = "ff18::efc0:4a42"
"""Default IPv6 multicast group"""
HR_TIME_DIV = (2.0**30)
# Message kinds
TYPE_HOST = 0x0000
TYPE_TIME = 0x0001
TYPE_TIME_HR = 0x0008
TYPE_PLUGIN = 0x0002
TYPE_PLUGIN_INSTANCE = 0x0003
TYPE_TYPE = 0x0004
TYPE_TYPE_INSTANCE = 0x0005
TYPE_VALUES = 0x0006
TYPE_INTERVAL = 0x0007
TYPE_INTERVAL_HR = 0x0009
# For notifications
TYPE_MESSAGE = 0x0100
TYPE_SEVERITY = 0x0101
# DS kinds
DS_TYPE_COUNTER = 0
DS_TYPE_GAUGE = 1
DS_TYPE_DERIVE = 2
DS_TYPE_ABSOLUTE = 3
header = struct.Struct("!2H")
number = struct.Struct("!Q")
short = struct.Struct("!H")
double = struct.Struct("<d")
def decode_network_values(ptype, plen, buf):
"""Decodes a list of DS values in collectd network format
"""
nvalues = short.unpack_from(buf, header.size)[0]
off = header.size + short.size + nvalues
valskip = double.size
# Check whether our expected packet size is the reported one
assert ((valskip + 1) * nvalues + short.size + header.size) == plen
assert double.size == number.size
result = []
for dstype in [ord(x) for x in buf[header.size+short.size:off]]:
if dstype == DS_TYPE_COUNTER:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_GAUGE:
result.append((dstype, double.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_DERIVE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_ABSOLUTE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
else:
raise ValueError("DS type %i unsupported" % dstype)
return result
def decode_network_number(ptype, plen, buf):
"""Decodes a number (64-bit unsigned) from collectd network format.
"""
return number.unpack_from(buf, header.size)[0]
def decode_network_string(msgtype, plen, buf):
"""Decodes a string from collectd network format.
"""
return buf[header.size:plen-1]
# Mapping of message types to decoding functions.
_decoders = {
TYPE_VALUES : decode_network_values,
TYPE_TIME : decode_network_number,
TYPE_TIME_HR : decode_network_number,
TYPE_INTERVAL : decode_network_number,
TYPE_INTERVAL_HR : decode_network_number,
TYPE_HOST : decode_network_string,
TYPE_PLUGIN : decode_network_string,
TYPE_PLUGIN_INSTANCE: decode_network_string,
TYPE_TYPE : decode_network_string,
TYPE_TYPE_INSTANCE : decode_network_string,
TYPE_MESSAGE : decode_network_string,
TYPE_SEVERITY : decode_network_number,
}
def decode_network_packet(buf):
"""Decodes a network packet in collectd format.
"""
off = 0
blen = len(buf)
while off < blen:
ptype, plen = header.unpack_from(buf, off)
if plen > blen - off:
raise ValueError("Packet longer than amount of data in buffer")
if ptype not in _decoders:
raise ValueError("Message type %i not recognized" % ptype)
yield ptype, _decoders[ptype](ptype, plen, buf[off:])
off += plen
class Data(object):
time = 0
host = None
plugin = None
plugininstance = None
type = None
typeinstance = None
def __init__(self, **kw):
[setattr(self, k, v) for k, v in kw.items()]
@property
def datetime(self):
return datetime.fromtimestamp(self.time)
@property
def source(self):
buf = StringIO()
if self.host:
buf.write(str(self.host))
if self.plugin:
buf.write("/")
buf.write(str(self.plugin))
if self.plugininstance:
buf.write("/")
buf.write(str(self.plugininstance))
if self.type:
buf.write("/")
buf.write(str(self.type))
if self.typeinstance:
buf.write("/")
buf.write(str(self.typeinstance))
return buf.getvalue()
def __str__(self):
return "[%i] %s" % (self.time, self.source)
class Notification(Data):
FAILURE = 1
WARNING = 2
OKAY = 4
SEVERITY = {
FAILURE: "FAILURE",
WARNING: "WARNING",
OKAY : "OKAY",
}
__severity = 0
message = ""
def __set_severity(self, value):
if value in (self.FAILURE, self.WARNING, self.OKAY):
self.__severity = value
severity = property(lambda self: self.__severity, __set_severity)
@property
def severitystring(self):
return self.SEVERITY.get(self.severity, "UNKNOWN")
def __str__(self):
return "%s [%s] %s" % (
super(Notification, self).__str__(),
self.severitystring,
self.message)
class Values(Data, list):
def __str__(self):
return "%s %s" % (Data.__str__(self), list.__str__(self))
def interpret_opcodes(iterable):
vl = Values()
nt = Notification()
for kind, data in iterable:
if kind == TYPE_TIME:
vl.time = nt.time = data
elif kind == TYPE_TIME_HR:
vl.time = nt.time = data / HR_TIME_DIV
elif kind == TYPE_INTERVAL:
vl.interval = data
elif kind == TYPE_INTERVAL_HR:
vl.interval = data / HR_TIME_DIV
elif kind == TYPE_HOST:
vl.host = nt.host = data
elif kind == TYPE_PLUGIN:
vl.plugin = nt.plugin = data
elif kind == TYPE_PLUGIN_INSTANCE:
vl.plugininstance = nt.plugininstance = data
elif kind == TYPE_TYPE:
vl.type = nt.type = data
elif kind == TYPE_TYPE_INSTANCE:
vl.typeinstance = nt.typeinstance = data
elif kind == TYPE_SEVERITY:
nt.severity = data
elif kind == TYPE_MESSAGE:
nt.message = data
yield deepcopy(nt)
elif kind == TYPE_VALUES:
vl[:] = data
yield deepcopy(vl)
class Reader(object):
"""Network reader for collectd data.
Listens on the network in a given address, which can be a multicast
group address, and handles reading data when it arrives.
"""
addr = None
host = None
port = DEFAULT_PORT
BUFFER_SIZE = 16384
def __init__(self, host=None, port=DEFAULT_PORT, multicast=False):
if host is None:
multicast = True
host = DEFAULT_IPv4_GROUP
self.host, self.port = host, port
self.ipv6 = ":" in self.host
family, socktype, proto, canonname, sockaddr = socket.getaddrinfo(
None if multicast else self.host, self.port,
socket.AF_INET6 if self.ipv6 else socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)[0]
self._sock = socket.socket(family, socktype, proto)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(sockaddr)
if multicast:
if hasattr(socket, "SO_REUSEPORT"):
self._sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEPORT, 1)
val = None
if family == socket.AF_INET:
assert "." in self.host
val = struct.pack("4sl",
socket.inet_aton(self.host), socket.INADDR_ANY)
elif family == socket.AF_INET6:
raise NotImplementedError("IPv6 support not ready yet")
else:
raise ValueError("Unsupported network address family")
self._sock.setsockopt(
socket.IPPROTO_IPV6 if self.ipv6 else socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP, val)
self._sock.setsockopt(
socket.IPPROTO_IPV6 if self.ipv6 else socket.IPPROTO_IP,
socket.IP_MULTICAST_LOOP, 0)
def receive(self):
"""Receives a single raw collect network packet.
"""
return self._sock.recv(self.BUFFER_SIZE)
def decode(self, buf=None):
"""Decodes a given buffer or the next received packet.
"""
if buf is None:
buf = self.receive()
return decode_network_packet(buf)
def interpret(self, iterable=None):
"""Interprets a sequence
"""
if iterable is None:
iterable = self.decode()
if isinstance(iterable, str):
iterable = self.decode(iterable)
return interpret_opcodes(iterable)
| swiftstack-collectd-master | contrib/collectd_network.py |
#-*- coding: ISO-8859-1 -*-
# collect.py: the python collectd-unixsock module.
#
# Requires collectd to be configured with the unixsock plugin, like so:
#
# LoadPlugin unixsock
# <Plugin unixsock>
# SocketFile "/var/run/collectd-unixsock"
# SocketPerms "0775"
# </Plugin>
#
# Copyright (C) 2008 Clay Loveless <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the author be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import socket
import sys
class Collectd():
def __init__(self, path='/var/run/collectd-unixsock', noisy=False):
self.noisy = noisy
self.path = path
self._sock = self._connect()
def flush(self, timeout=None, plugins=[], identifiers=[]):
"""Send a FLUSH command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#FLUSH
"""
# have to pass at least one plugin or identifier
if not plugins and not identifiers:
return None
args = []
if timeout:
args.append("timeout=%s" % timeout)
if plugins:
plugin_args = map(lambda x: "plugin=%s" % x, plugins)
args.extend(plugin_args)
if identifiers:
identifier_args = map(lambda x: "identifier=%s" % x, identifiers)
args.extend(identifier_args)
return self._cmd('FLUSH %s' % ' '.join(args))
def getthreshold(self, identifier):
"""Send a GETTHRESHOLD command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#GETTHRESHOLD
"""
numvalues = self._cmd('GETTHRESHOLD "%s"' % identifier)
lines = []
if not numvalues or numvalues < 0:
raise KeyError("Identifier '%s' not found" % identifier)
lines = self._readlines(numvalues)
return lines
def getval(self, identifier, flush_after=True):
"""Send a GETVAL command.
Also flushes the identifier if flush_after is True.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#GETVAL
"""
numvalues = self._cmd('GETVAL "%s"' % identifier)
lines = []
if not numvalues or numvalues < 0:
raise KeyError("Identifier '%s' not found" % identifier)
lines = self._readlines(numvalues)
if flush_after:
self.flush(identifiers=[identifier])
return lines
def listval(self):
"""Send a LISTVAL command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#LISTVAL
"""
numvalues = self._cmd('LISTVAL')
lines = []
if numvalues:
lines = self._readlines(numvalues)
return lines
def putnotif(self, message, options={}):
"""Send a PUTNOTIF command.
Options must be passed as a Python dictionary. Example:
options={'severity': 'failure', 'host': 'example.com'}
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#PUTNOTIF
"""
args = []
if options:
options_args = map(lambda x: "%s=%s" % (x, options[x]), options)
args.extend(options_args)
args.append('message="%s"' % message)
return self._cmd('PUTNOTIF %s' % ' '.join(args))
def putval(self, identifier, values, options={}):
"""Send a PUTVAL command.
Options must be passed as a Python dictionary. Example:
options={'interval': 10}
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#PUTVAL
"""
args = []
args.append('"%s"' % identifier)
if options:
options_args = map(lambda x: "%s=%s" % (x, options[x]), options)
args.extend(options_args)
values = map(str, values)
args.append(':'.join(values))
return self._cmd('PUTVAL %s' % ' '.join(args))
def _cmd(self, c):
try:
return self._cmdattempt(c)
except socket.error, (errno, errstr):
sys.stderr.write("[error] Sending to socket failed: [%d] %s\n"
% (errno, errstr))
self._sock = self._connect()
return self._cmdattempt(c)
def _cmdattempt(self, c):
if self.noisy:
print "[send] %s" % c
if not self._sock:
sys.stderr.write("[error] Socket unavailable. Can not send.")
return False
self._sock.send(c + "\n")
status_message = self._readline()
if self.noisy:
print "[receive] %s" % status_message
if not status_message:
return None
code, message = status_message.split(' ', 1)
if int(code):
return int(code)
return False
def _connect(self):
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
if self.noisy:
print "[socket] connected to %s" % self.path
return sock
except socket.error, (errno, errstr):
sys.stderr.write("[error] Connecting to socket failed: [%d] %s"
% (errno, errstr))
return None
def _readline(self):
"""Read single line from socket"""
if not self._sock:
sys.stderr.write("[error] Socket unavailable. Can not read.")
return None
try:
data = ''
buf = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
if data != "\n":
buf.append(data)
return ''.join(buf)
except socket.error, (errno, errstr):
sys.stderr.write("[error] Reading from socket failed: [%d] %s"
% (errno, errstr))
self._sock = self._connect()
return None
def _readlines(self, sizehint=0):
"""Read multiple lines from socket"""
total = 0
list = []
while True:
line = self._readline()
if not line:
break
list.append(line)
total = len(list)
if sizehint and total >= sizehint:
break
return list
def __del__(self):
if not self._sock:
return
try:
self._sock.close()
except socket.error, (errno, errstr):
sys.stderr.write("[error] Closing socket failed: [%d] %s"
% (errno, errstr))
if __name__ == '__main__':
"""Collect values from socket and dump to STDOUT"""
c = Collectd('/var/run/collectd-unixsock', noisy=True)
list = c.listval()
for val in list:
stamp, identifier = val.split()
print "\n%s" % identifier
print "\tUpdate time: %s" % stamp
values = c.getval(identifier)
print "\tValue list: %s" % ', '.join(values)
# don't fetch thresholds by default because collectd will crash
# if there is no treshold for the given identifier
#thresholds = c.getthreshold(identifier)
#print "\tThresholds: %s" % ', '.join(thresholds)
| swiftstack-collectd-master | contrib/collectd_unixsock.py |
#!/usr/bin/env python
# vim: sts=4 sw=4 et
# Simple unicast proxy to send collectd traffic to another host/port.
# Copyright (C) 2007 Pavel Shramov <shramov at mexmat.net>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
"""
Simple unicast proxy for collectd (>= 4.0).
Binds to 'local' address and forwards all traffic to 'remote'.
"""
import socket
import struct
""" Local multicast group/port"""
local = ("239.192.74.66", 25826)
""" Address to send packets """
remote = ("grid.pp.ru", 35826)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
mreq = struct.pack("4sl", socket.inet_aton(local[0]), socket.INADDR_ANY)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.bind(local)
out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if __name__ == "__main__":
while True:
(buf, addr) = sock.recvfrom(2048)
sock.sendto(buf, remote)
| swiftstack-collectd-master | contrib/network-proxy.py |
#!/usr/bin/python
###############################################################################
# WARNING! Importing this script will break the exec plugin! #
###############################################################################
# Use this if you want to create new processes from your python scripts. #
# Normally you will get a OSError exception when the new process terminates #
# because collectd will ignore the SIGCHLD python is waiting for. #
# This script will restore the default SIGCHLD behavior so python scripts can #
# create new processes without errors. #
###############################################################################
# WARNING! Importing this script will break the exec plugin! #
###############################################################################
import signal
import collectd
def init():
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
collectd.register_init(init)
| swiftstack-collectd-master | contrib/python/getsigchld.py |
import numpy as np
from matplotlib import pyplot as plt
import os
DALI_EXTRA = os.environ['DALI_EXTRA_PATH']
cat = plt.imread(os.path.join(DALI_EXTRA, 'db/single/tiff/0/kitty-2948404_640.tiff'))
images = [
('horizontal', cat),
('mirror_horizontal', np.flip(cat, axis=1)),
('rotate_180', np.rot90(cat, 2)),
('mirror_vertical', np.flip(cat, axis=0)),
('mirror_horizontal_rotate_270', np.rot90(np.flip(cat, axis=1))),
('rotate_90', np.rot90(cat, 3)),
('mirror_horizontal_rotate_90', np.rot90(np.flip(cat, axis=1), 3)),
('rotate_270', np.rot90(cat)),
]
for name, img in images:
path = os.path.join(DALI_EXTRA, f'db/imgcodec/orientation/kitty-2948404_640_{name}.npy')
np.save(path, img.astype('uint8'))
| DALI_extra-main | db/imgcodec/orientation/make.py |
#!/bin/python3
from os import system
from PIL import Image
import numpy as np
import cv2
# Bitdepth conversion is done via ImageMagick
def convert_bitdepth(name, bitdepth):
input = '../../single/jpeg2k/' + name
output = name + f'-{bitdepth}bit'
system(f'convert {input}.jp2 -depth {bitdepth} {output}.jp2')
return output
def gen_reference(name):
np.save(name, cv2.imread(name + '.jp2', mode='RGB'))
gen_reference(convert_bitdepth('0/cat-1245673_640', 5))
gen_reference(convert_bitdepth('0/cat-1245673_640', 12))
| DALI_extra-main | db/imgcodec/jpeg2k/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
def save_reference(filename, angle, flip_x, flip_y):
img = Image.open(filename + '.jpg')
if flip_x:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if flip_y:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
img = img.rotate(angle, expand=True)
np.save(filename, np.asarray(img))
save_reference('orientation/padlock-406986_640_horizontal', 0, False, False)
save_reference('orientation/padlock-406986_640_mirror_horizontal_rotate_270', 90, True, False)
save_reference('orientation/padlock-406986_640_mirror_vertical', 0, False, True)
save_reference('orientation/padlock-406986_640_no_orientation', 0, False, False)
save_reference('orientation/padlock-406986_640_rotate_270', 90, False, False)
save_reference('orientation/padlock-406986_640_mirror_horizontal', 0, True, False)
save_reference('orientation/padlock-406986_640_mirror_horizontal_rotate_90', 270, True, False)
save_reference('orientation/padlock-406986_640_no_exif', 0, False, False)
save_reference('orientation/padlock-406986_640_rotate_180', 180, False, False)
save_reference('orientation/padlock-406986_640_rotate_90', 270, False, False)
| DALI_extra-main | db/imgcodec/jpeg/generate.py |
import os
import numpy as np
from matplotlib import pyplot as plt
def to_uint8(float_image):
return np.round(float_image * 255).astype('uint8')
def save(name, float_image):
float_image = float_image.astype('float32')
uint8_image = to_uint8(float_image)
np.save(f'{output_path}/cat-111793_640_{name}_float.npy', float_image)
np.save(f'{output_path}/cat-111793_640_{name}_uint8.npy', uint8_image)
def ycbcr_to_rgb(ycbcr):
ycbcr -= np.asarray([0.0625, 0.5, 0.5])
ycbcr *= np.asarray([1.164, 1, 1])
rgb = np.matmul(ycbcr, np.asarray([[1, 0, 1.596],
[1, -0.392, -0.813],
[1, 2.017, 0]]).T)
return rgb
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def rgb_to_gray(rgb):
gray_weights = np.asarray([0.299, 0.587, 0.114])
gray = np.dot(rgb, gray_weights)
return np.expand_dims(gray, axis=-1)
def gray_to_ycbcr(gray):
chroma = np.full_like(gray, 0.5)
y = gray * (0.257 + 0.504 + 0.098) + 0.0625
return np.dstack([y, chroma, chroma])
def rgb_to_bgr(rgb):
return np.flip(rgb, axis=2)
def gray_to_rgb(gray):
return np.dstack([gray, gray, gray])
dali_extra_path = os.environ['DALI_EXTRA_PATH']
output_path = os.path.join(dali_extra_path, 'db/imgcodec/colorspaces/')
tiff = plt.imread(os.path.join(dali_extra_path, 'db/single/tiff/0/cat-111793_640.tiff'))
rgb = tiff.astype('float32')/255
ycbcr = rgb_to_ycbcr(rgb)
gray = rgb_to_gray(rgb)
bgr = rgb_to_bgr(rgb)
save('rgb', rgb)
save('ycbcr', ycbcr)
save('gray', gray)
save('bgr', bgr)
save('rgb_from_gray', gray_to_rgb(gray))
save('ycbcr_from_gray', gray_to_ycbcr(gray))
save('bgr_from_gray', rgb_to_bgr(gray_to_rgb(gray)))
| DALI_extra-main | db/imgcodec/colorspaces/make.py |
import os
import tifffile
import matplotlib.pyplot as plt
import numpy as np
dali_extra_path = os.environ['DALI_EXTRA_PATH']
source_path = os.path.join(dali_extra_path, 'db/imgcodec/colorspaces/cat-111793_640_rgb_float.npy')
def output_path(color_format, layout):
output_directory = os.path.join(dali_extra_path, 'db/imgcodec/colorspaces/layouts')
return os.path.join(output_directory, f'cat-111793_640_{color_format}_float_{layout}.npy')
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr.astype('float32')
dir_path = os.path.join(dali_extra_path, 'db/imgcodec/colorspaces/')
hwc_rgb = np.load(source_path).astype('float32')
hwc_ycbcr = rgb_to_ycbcr(hwc_rgb)
np.save(output_path('rgb', 'hwc'), hwc_rgb)
np.save(output_path('rgb', 'hcw'), hwc_rgb.transpose(0,2,1))
np.save(output_path('rgb', 'chw'), hwc_rgb.transpose(2,0,1))
np.save(output_path('ycbcr', 'hwc'), hwc_ycbcr)
np.save(output_path('ycbcr', 'hcw'), hwc_ycbcr.transpose(0,2,1))
np.save(output_path('ycbcr', 'chw'), hwc_ycbcr.transpose(2,0,1))
| DALI_extra-main | db/imgcodec/colorspaces/layouts/make.py |
import os
import tifffile
import numpy as np
import tempfile
dali_extra_path = os.environ['DALI_EXTRA_PATH']
tiff_path = os.path.join(dali_extra_path, 'db/imgcodec/tiff/bitdepths')
ref_path = os.path.join(dali_extra_path, 'db/imgcodec/tiff/bitdepths/reference')
h, w = 123, 321
hgrad = np.tile(np.linspace(0., 1., w), (h, 1))
vgrad = np.tile(np.linspace(0., 1., h), (w, 1)).T
image = np.dstack([hgrad, vgrad, 1. - hgrad]) # RGB gradient
def filename(bits):
return os.path.join(tiff_path, f'rgb_{bits}bit.tiff')
def ref_filename(bits):
return os.path.join(ref_path, f'rgb_{bits}bit.tiff.npy')
def ref_float_filename(bits):
return os.path.join(ref_path, f'rgb_{bits}bit_float.tiff.npy')
standard_bitdepths = [8, 16, 32]
for bits in range(1, 32+1):
ref = np.round(image * (2**bits - 1)) / (2**bits - 1)
np.save(ref_float_filename(bits), ref)
if bits in standard_bitdepths:
# For standard bit-depths it's possible to test for exact match
np.save(ref_filename(bits), (image * (2**bits - 1)).astype(f'uint{bits}'))
if bits in standard_bitdepths:
# For images in standard bit-depths save the image as is
tifffile.imwrite(filename(bits), (image * (2**bits - 1)).astype(f'uint{bits}'))
else:
# For non-standard bit-depths make a 32-bit image and convert it.
tmp = tempfile.NamedTemporaryFile()
tifffile.imwrite(tmp, (ref * (2**32 - 1)).astype('uint32'))
os.system(f'convert -type TrueColor -define tiff:bits-per-sample={bits} -depth {bits} {tmp.name} {filename(bits)}')
| DALI_extra-main | db/imgcodec/tiff/bitdepths/make.py |
import numpy as np
np.random.seed(123456)
shapes = [(10, 22, 3), (30, 20, 3), (100, 5, 3)]
dtype = np.uint8
arrays = [np.array(np.random.rand(*sh) * 255, dtype=dtype) for sh in shapes]
for i, arr in enumerate(arrays):
np.save(f"./input{i}.npy", arr)
def gen_crop():
start = [(2, 8, 0), (22, 2, 0), (55, 4, 0)]
end = [(8, 19, 3), (28, 4, 3), (100, 5, 3)]
for i, arr in enumerate(arrays):
y0, x0, c0 = start[i]
y1, x1, c1 = end[i]
arr = arr[y0:y1, x0:x1, c0:c1]
np.save(f"./output{i}_c.npy", arr)
def gen_crop_mirror():
start = [(2, 8, 0), (22, 2, 0), (55, 4, 0)]
end = [(8, 19, 3), (28, 4, 3), (100, 5, 3)]
flip = [(False, True), (True, True), (False, False)]
for i, arr in enumerate(arrays):
y0, x0, c0 = start[i]
y1, x1, c1 = end[i]
arr = arr[y0:y1, x0:x1, c0:c1]
flip_y, flip_x = flip[i]
if flip_y:
arr = np.flip(arr, axis=0)
if flip_x:
arr = np.flip(arr, axis=1)
np.save(f"./output{i}_cm.npy", arr)
def gen_crop_mirror_normalize():
start = [(2, 8, 0), (22, 2, 0), (55, 4, 0)]
end = [(8, 19, 3), (28, 4, 3), (100, 5, 3)]
flip = [(False, True), (True, True), (False, False)]
mean = [255 * np.array([0.485, 0.456, 0.406], dtype=np.float32),
255 * np.array([0.455, 0.436, 0.416], dtype=np.float32),
255 * np.array([0.495, 0.466, 0.396], dtype=np.float32)]
stddev = [255 * np.array([0.229, 0.224, 0.225], dtype=np.float32),
255 * np.array([0.225, 0.224, 0.221], dtype=np.float32),
255 * np.array([0.226, 0.229, 0.222], dtype=np.float32)]
for i, arr in enumerate(arrays):
y0, x0, c0 = start[i]
y1, x1, c1 = end[i]
arr = arr.astype(np.float32)
arr = (arr - mean[i]) / stddev[i]
arr = arr[y0:y1, x0:x1, c0:c1]
flip_y, flip_x = flip[i]
if flip_y:
arr = np.flip(arr, axis=0)
if flip_x:
arr = np.flip(arr, axis=1)
np.save(f"./output{i}_cmn.npy", arr)
def gen_crop_mirror_normalize_transpose():
start = [(2, 8, 0), (22, 2, 0), (55, 4, 0)]
end = [(8, 19, 3), (28, 4, 3), (100, 5, 3)]
flip = [(False, True), (True, True), (False, False)]
mean = [255 * np.array([0.485, 0.456, 0.406], dtype=np.float32),
255 * np.array([0.455, 0.436, 0.416], dtype=np.float32),
255 * np.array([0.495, 0.466, 0.396], dtype=np.float32)]
stddev = [255 * np.array([0.229, 0.224, 0.225], dtype=np.float32),
255 * np.array([0.225, 0.224, 0.221], dtype=np.float32),
255 * np.array([0.226, 0.229, 0.222], dtype=np.float32)]
for i, arr in enumerate(arrays):
y0, x0, c0 = start[i]
y1, x1, c1 = end[i]
arr = arr.astype(np.float32)
arr = (arr - mean[i]) / stddev[i]
arr = arr[y0:y1, x0:x1, c0:c1]
flip_y, flip_x = flip[i]
if flip_y:
arr = np.flip(arr, axis=0)
if flip_x:
arr = np.flip(arr, axis=1)
arr = np.transpose(arr, (2, 0, 1))
np.save(f"./output{i}_cmnt.npy", arr)
def gen_pad_normalize():
start = np.array([[-2, 0, 0], [0, -4, 0], [0, 0, 0]])
end = np.array([[8, 19, 4], [28, 4, 4], [120, 5, 4]])
mean = [255 * np.array([0.485, 0.456, 0.406], dtype=np.float32),
255 * np.array([0.455, 0.436, 0.416], dtype=np.float32),
255 * np.array([0.495, 0.466, 0.396], dtype=np.float32)]
stddev = [255 * np.array([0.229, 0.224, 0.225], dtype=np.float32),
255 * np.array([0.225, 0.224, 0.221], dtype=np.float32),
255 * np.array([0.226, 0.229, 0.222], dtype=np.float32)]
# Padding channels too
fill_values = [np.array([255.0, 128.0, 64.0, 32.0], dtype=np.float32) + np.float32(i) for i in range(3)]
out_sh = end - start
out0 = np.zeros(out_sh[0], dtype=np.float32)
out0 = out0 + fill_values[0]
out0[2:10, :, :3] = (arrays[0][0:8, :19, :3].astype(np.float32) - mean[0]) / stddev[0]
np.save(f"./output0_pn.npy", out0)
out1 = np.zeros(out_sh[1], dtype=np.float32)
out1 = out1 + fill_values[1]
out1[:, 4:8, :3] = (arrays[1][:28, :4, :3].astype(np.float32) - mean[1]) / stddev[1]
np.save(f"./output1_pn.npy", out1)
out2 = np.zeros(out_sh[2], dtype=np.float32)
out2 = out2 + fill_values[2]
out2[:100, :, :3] = (arrays[2][:100, :5, :3].astype(np.float32) - mean[2]) / stddev[2]
np.save(f"./output2_pn.npy", out2)
def generate():
gen_crop()
gen_crop_mirror()
gen_crop_mirror_normalize()
gen_crop_mirror_normalize_transpose()
gen_pad_normalize()
generate()
| DALI_extra-main | db/test_data/crop_mirror_normalize/make.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def hwc_to_chw(image):
return np.transpose(image, (2, 0, 1))
def save(path, arr):
np.save(path, arr)
np.save(path + "_chw", hwc_to_chw(arr))
def gen_reference(name):
img = Image.open('../../jpeg2k/' + name + '.jp2')
save(name, np.asarray(img))
def gen_reference_roi(name, roi):
img = Image.open('../../jpeg2k/' + name + '.jp2')
img = img.crop(roi)
save(name + '_roi', np.asarray(img))
def gen_reference_ycbcr(name):
img = Image.open('../../jpeg2k/' + name + '.jp2')
rgb = np.asarray(img).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
save(name + '_ycbcr', to_uint8(ycbcr))
def gen_reference_gray(name):
img_gray = cv2.imread('../../jpeg2k/' + name + '.jp2', cv2.IMREAD_GRAYSCALE)
gray = np.expand_dims(img_gray, -1)
save(name + '_gray', gray)
gen_reference('0/cat-1245673_640')
gen_reference_roi('0/cat-1245673_640', (33, 17, 489, 276))
gen_reference_ycbcr('0/cat-1245673_640')
gen_reference_gray('0/cat-1245673_640')
gen_reference('0/cat-2184682_640')
gen_reference_ycbcr('0/cat-2184682_640')
gen_reference_gray('0/cat-2184682_640')
gen_reference('0/cat-300572_640')
gen_reference_ycbcr('0/cat-300572_640')
gen_reference_gray('0/cat-300572_640')
gen_reference('0/cat-3113513_640')
gen_reference_ycbcr('0/cat-3113513_640')
gen_reference_gray('0/cat-3113513_640')
gen_reference_roi('2/tiled-cat-1046544_640', (220, 178, 290, 456))
gen_reference_roi('2/tiled-cat-111793_640', (317, 9, 325, 58))
gen_reference_roi('2/tiled-cat-3113513_640', (1, 2, 600, 200))
| DALI_extra-main | db/single/reference/jpeg2k/generating.py |
#!/bin/python3
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import cv2
import tempfile
def tool(tool_name):
# JPEG_TOOL_PREFIX=''
# cjpeg from libjpeg-turbo (use dev branch until Lossless is supported (release 2.2?)
JPEG_TOOL_PREFIX='LD_LIBRARY_PATH=~/git/lossless_libjpeg-turbo/install/lib ~/git/lossless_libjpeg-turbo/install/bin/'
return f'{JPEG_TOOL_PREFIX}{tool_name}'
def convert_to_jpeg_lossless(output_path, image_path, precision=16, psv=1):
cjpeg = tool('cjpeg')
os.system(f'{cjpeg} -grayscale -precision {precision} -lossless {psv} {image_path} > {output_path}')
def convert_jpeg_lossless_to_pnm(output_path, image_path):
djpeg = tool('djpeg')
os.system(f'{djpeg} -pnm {image_path} > {output_path}')
def generate_reference_grayscale(ref_path, img_path, precision=16, dtype=np.uint16):
with tempfile.NamedTemporaryFile() as tmp:
convert_jpeg_lossless_to_pnm(tmp.name, img_path)
gray = np.asarray(cv2.imread(tmp.name, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH))
gray = np.expand_dims(gray, axis=-1)
print(f"Saving {ref_path} : {gray.shape}")
np.save(ref_path, gray)
def generate_sample(img_source, jpeg_target, npy_target, precision=16, dtype=np.uint16):
convert_to_jpeg_lossless(jpeg_target, img_source, precision=precision)
generate_reference_grayscale(npy_target, jpeg_target, precision=precision, dtype=dtype)
def generate():
generate_sample(
img_source='../../pnm/0/cat-1245673_640.pgm',
jpeg_target='../../jpeg_lossless/0/cat-1245673_640_grayscale_16bit.jpg',
npy_target='cat-1245673_640_grayscale_16bit',
precision=16, dtype=np.uint16)
for (precision, dtype) in [(16, np.uint16), (12, np.uint16), (8, np.uint8)]:
generate_sample(
img_source='../../pnm/0/cat-3449999_640.pgm',
jpeg_target=f'../../jpeg_lossless/0/cat-3449999_640_grayscale_{precision}bit.jpg',
npy_target=f'cat-3449999_640_grayscale_{precision}bit',
precision=precision, dtype=dtype)
generate()
| DALI_extra-main | db/single/reference/jpeg_lossless/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
img0_path = '../../webp/lossless/cat-3449999_640.webp'
img0 = Image.open(img0_path)
img0_gray = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
cropped = img0.crop((20, 5, 1000, 800))
np.save('cat-3449999_640', np.asarray(img0))
np.save('cat-3449999_640_roi', np.asarray(cropped))
np.save('cat-3449999_640_gray', np.expand_dims(img0_gray, -1))
np.save('cat-3449999_640_ycbcr', get_ycbcr(img0))
# Few more images
img1_path = '../../webp/lossy/cat-1046544_640.webp'
img1 = Image.open(img1_path)
img1_gray = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1046544_640', np.asarray(img1))
np.save('cat-1046544_640_gray', np.expand_dims(img1_gray, -1))
np.save('cat-1046544_640_ycbcr', get_ycbcr(img1))
img2_path = '../../webp/lossless/cat-1245673_640.webp'
img2 = Image.open(img2_path)
img2_gray = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1245673_640', np.asarray(img2))
np.save('cat-1245673_640_gray', np.expand_dims(img2_gray, -1))
np.save('cat-1245673_640_ycbcr', get_ycbcr(img2))
| DALI_extra-main | db/single/reference/webp/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
img0_path = '../../png/0/cat-3449999_640.png'
img0 = Image.open(img0_path)
img0_gray = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
cropped = img0.crop((20, 5, 1000, 800))
np.save('cat-3449999_640', np.asarray(img0))
np.save('cat-3449999_640_gray', np.expand_dims(img0_gray, -1))
np.save('cat-3449999_640_roi', np.asarray(cropped))
np.save('cat-3449999_640_ycbcr', get_ycbcr(img0))
# Few more images
img1_path = '../../png/0/cat-1046544_640.png'
img1 = Image.open(img1_path)
img1_gray = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1046544_640', np.asarray(img1))
np.save('cat-1046544_640_gray', np.expand_dims(img1_gray, -1))
np.save('cat-1046544_640_ycbcr', get_ycbcr(img1))
img2_path = '../../png/0/cat-1245673_640.png'
img2 = Image.open(img2_path)
img2_gray = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1245673_640', np.asarray(img2))
np.save('cat-1245673_640_gray', np.expand_dims(img2_gray, -1))
np.save('cat-1245673_640_ycbcr', get_ycbcr(img2))
| DALI_extra-main | db/single/reference/png/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
def hwc_to_chw(image):
return np.transpose(image, (2, 0, 1))
def save(path, arr):
np.save(path, arr)
np.save(path + "_chw", hwc_to_chw(arr))
img0_path = '../../jpeg/134/site-1534685_1280.jpg'
img0 = Image.open(img0_path)
img0_gray = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
cropped = img0.crop((20, 5, 1000, 800))
save('site-1534685_1280', np.asarray(img0))
save('site-1534685_1280_gray', np.expand_dims(img0_gray, -1))
save('site-1534685_1280_roi', np.asarray(cropped))
save('site-1534685_1280_ycbcr', get_ycbcr(img0))
# Few more images
img1_path = '../../jpeg/100/swan-3584559_640.jpg'
img1 = Image.open(img1_path)
img1_gray = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
save('swan-3584559_640', np.asarray(img1))
save('swan-3584559_640_gray', np.expand_dims(img1_gray, -1))
save('swan-3584559_640_ycbcr', get_ycbcr(img1))
img2_path = '../../jpeg/113/snail-4291306_1280.jpg'
img2 = Image.open(img2_path)
img2_gray = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
save('snail-4291306_1280', np.asarray(img2))
save('snail-4291306_1280_gray', np.expand_dims(img2_gray, -1))
save('snail-4291306_1280_ycbcr', get_ycbcr(img2))
| DALI_extra-main | db/single/reference/jpeg/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
img0_path = '../../bmp/0/cat-3449999_640.bmp'
img0 = Image.open(img0_path)
img0_gray = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
cropped = img0.crop((20, 5, 1000, 800))
np.save('cat-3449999_640', np.asarray(img0))
np.save('cat-3449999_640_gray', np.expand_dims(img0_gray, -1))
np.save('cat-3449999_640_roi', np.asarray(cropped))
np.save('cat-3449999_640_ycbcr', get_ycbcr(img0))
# Few more images
img1_path = '../../bmp/0/cat-1046544_640.bmp'
img1 = Image.open(img1_path)
img1_gray = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1046544_640', np.asarray(img1))
np.save('cat-1046544_640_gray', np.expand_dims(img1_gray, -1))
np.save('cat-1046544_640_ycbcr', get_ycbcr(img1))
img2_path = '../../bmp/0/cat-1245673_640.bmp'
img2 = Image.open(img2_path)
img2_gray = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-1245673_640', np.asarray(img2))
np.save('cat-1245673_640_gray', np.expand_dims(img2_gray, -1))
np.save('cat-1245673_640_ycbcr', get_ycbcr(img2))
| DALI_extra-main | db/single/reference/bmp/generating.py |
#!/bin/python3
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
img0_path = '../../pnm/0/cat-1046544_640.pnm'
img0 = Image.open(img0_path)
img0_gray = cv2.imread(img0_path, cv2.IMREAD_GRAYSCALE)
cropped = img0.crop((20, 5, 1000, 800))
np.save('cat-1046544_640', np.asarray(img0))
np.save('cat-1046544_640_gray', np.expand_dims(img0_gray, -1))
np.save('cat-1046544_640_roi', np.asarray(cropped))
np.save('cat-1046544_640_ycbcr', get_ycbcr(img0))
# Few more images
img1_path = '../../pnm/0/cat-111793_640.ppm'
img1 = Image.open(img1_path)
img1_gray = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
np.save('cat-111793_640', np.asarray(img1))
np.save('cat-111793_640_gray', np.expand_dims(img1_gray, -1))
np.save('cat-111793_640_ycbcr', get_ycbcr(img1))
img2_path = '../../pnm/0/domestic-cat-726989_640.pnm'
img2 = Image.open(img2_path)
img2_gray = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)
np.save('domestic-cat-726989_640', np.asarray(img2))
np.save('domestic-cat-726989_640_gray', np.expand_dims(img2_gray, -1))
np.save('domestic-cat-726989_640_ycbcr', get_ycbcr(img2))
| DALI_extra-main | db/single/reference/pnm/generating.py |
import os
import tifffile
import matplotlib.pyplot as plt
import numpy as np
import cv2
# https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
def rgb_to_ycbcr(rgb):
ycbcr_weights = np.asarray([[ 0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[ 0.439, -0.368, -0.071]]).T
ycbcr = np.matmul(rgb, ycbcr_weights) + np.asarray([0.0625, 0.5, 0.5])
return ycbcr
def rgb_to_gray(rgb):
gray_weights = np.asarray([0.299, 0.587, 0.114])
gray = np.dot(rgb, gray_weights)
return np.expand_dims(gray, axis=-1)
def to_uint8(float_image):
return np.round((float_image * 255) + 1).astype('uint8')
def get_ycbcr(image):
rgb = np.asarray(image).astype('float32') / 255
ycbcr = rgb_to_ycbcr(rgb)
return to_uint8(ycbcr)
def read_rgb(path):
return plt.imread(path).astype('uint8')
def read_gray(path):
arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
return np.expand_dims(arr, -1)
dali_extra_path = os.environ['DALI_EXTRA_PATH']
tiff_path = os.path.join(dali_extra_path, 'db/single/tiff')
tiff_ref_path = os.path.join(dali_extra_path, 'db/single/reference/tiff')
path0 = os.path.join(tiff_path, '0/cat-111793_640.tiff')
img0 = read_rgb(path0).astype('uint8')
np.save(os.path.join(tiff_ref_path, '0/cat-111793_640.tiff.npy'), img0)
gray0 = read_gray(path0)
tifffile.imwrite(os.path.join(tiff_path, '0/cat-111793_640_gray.tiff'), gray0)
np.save(os.path.join(tiff_ref_path, '0/cat-111793_640_gray.tiff.npy'), gray0)
ycbcr0 = get_ycbcr(img0)
np.save(os.path.join(tiff_ref_path, '0/cat-111793_640_ycbcr.tiff.npy'), ycbcr0)
path1 = os.path.join(tiff_path, '0/cat-3449999_640.tiff')
img1 = read_rgb(path1).astype('uint8')
np.save(os.path.join(tiff_ref_path, '0/cat-3449999_640.tiff.npy'), img1)
gray1 = read_gray(path1)
tifffile.imwrite(os.path.join(tiff_path, '0/cat-3449999_640_gray.tiff'), gray1)
np.save(os.path.join(tiff_ref_path, '0/cat-3449999_640_gray.tiff.npy'), gray1)
ycbcr1 = get_ycbcr(img1)
np.save(os.path.join(tiff_ref_path, '0/cat-3449999_640_ycbcr.tiff.npy'), ycbcr1)
path2 = os.path.join(tiff_path, '0/cat-3504008_640.tiff')
img2 = plt.imread(path2).astype('uint8')
np.save(os.path.join(tiff_ref_path, '0/cat-3504008_640.tiff.npy'), img2)
gray2 = read_gray(path2)
tifffile.imwrite(os.path.join(tiff_path, '0/cat-3504008_640_gray.tiff'), gray2)
np.save(os.path.join(tiff_ref_path, '0/cat-3504008_640_gray.tiff.npy'), gray2)
ycbcr2 = get_ycbcr(img2)
np.save(os.path.join(tiff_ref_path, '0/cat-3504008_640_ycbcr.tiff.npy'), ycbcr2)
palette = plt.imread(os.path.join(tiff_path, '0/cat-300572_640_palette.tiff')).astype('uint8')[:,:,:3]
np.save(os.path.join(tiff_ref_path, '0/cat-300572_640_palette.tiff.npy'), palette)
| DALI_extra-main | db/single/reference/tiff/make.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
from setuptools import find_packages, setup
import versioneer
# read the contents of your README file
versions = versioneer.get_versions()
if versions["error"]:
today = datetime.date.today().timetuple()
year = today[0] % 1000
month = today[1]
day = today[2]
version = f"2.3.0.dev{year:02d}{month:02d}{day:02d}"
else:
version = versions["version"]
release = os.environ.get("NVFL_RELEASE")
if release == "1":
package_name = "nvflare"
else:
package_name = "nvflare-nightly"
setup(
name=package_name,
version=version,
cmdclass=versioneer.get_cmdclass(),
package_dir={"nvflare": "nvflare"},
packages=find_packages(
where=".",
include=[
"*",
],
exclude=["tests", "tests.*"],
), package_data={"": ["*.yml", "*.html", "poc.zip", "*.config", "*.conf"]},
)
| NVFlare-main | setup.py |
# Version: 0.21
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools
plugin
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring
# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements
# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error
# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with
# pylint:disable=attribute-defined-outside-init,too-many-arguments
import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match",
"%%s%%s" %% (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match",
"%s%s" % (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if my_path.endswith(".pyc") or my_path.endswith(".pyo"):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.21) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if 'build_ext' in cmds:
_build_ext = cmds['build_ext']
elif "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| NVFlare-main | versioneer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
def plot_acc(path):
log_path = os.path.join(path, "simulate_job", "app_site-1", "log.txt")
acc = []
with open(log_path, encoding="utf-8") as f:
for line in f.readlines():
str_split = line.split(" ")
if len(str_split) > 5:
if str_split[-2] == "train_accuracy:":
acc.append(float(str_split[-1]))
print(acc)
ep = [i * 10 for i in range(len(acc))]
plt.plot(ep, acc)
plt.xlabel("Local training epoch")
plt.ylabel("Training accuracy")
plt.title("One-shot VFL")
plt.savefig("figs/oneshotVFL_results1.png")
| NVFlare-main | research/one-shot-vfl/plt_valid.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_file",
type=str,
default="./config_fed_client.json",
help="config file in JSON format",
)
parser.add_argument(
"--intersection_file",
type=str,
help="Intersection file with overlapping data indices",
)
args = parser.parse_args()
with open(args.config_file, "r") as f:
config = json.load(f)
config["INTERSECTION_FILE"] = args.intersection_file
with open(args.config_file, "w") as f:
json.dump(config, f, indent=4)
f.write("\n")
print(f"Modified {args.config_file} to use INTERSECTION_FILE={config['INTERSECTION_FILE']}")
if __name__ == "__main__":
main()
| NVFlare-main | research/one-shot-vfl/set_intersection_file.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pt.networks.cifar10_nets import ModerateCNN
class SplitNN(ModerateCNN):
def __init__(self, split_id):
super().__init__()
if split_id not in [0, 1]:
raise ValueError(f"Only supports split_id '0' or '1' but was {self.split_id}")
self.split_id = split_id
if self.split_id == 0:
self.split_forward = self.conv_layer
elif self.split_id == 1:
self.split_forward = self.fc_layer
else:
raise ValueError(f"Expected split_id to be '0' or '1' but was {self.split_id}")
def forward(self, x):
x = self.split_forward(x)
return x
def forward_complt(self, x):
x = self.conv_layer(x)
x = x.view(x.size(0), -1)
x = self.fc_layer(x)
return x
def get_split_id(self):
return self.split_id
| NVFlare-main | research/one-shot-vfl/src/oneshotVFL/split_nn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oneshotVFL.cifar10_learner_oneshotVFL import CIFAR10LearnerOneshotVFL
from oneshotVFL.split_nn import SplitNN
from splitnn.cifar10_splitnn_dataset import CIFAR10SplitNN
from splitnn.cifar10_vertical_data_splitter import Cifar10VerticalDataSplitter
| NVFlare-main | research/one-shot-vfl/src/oneshotVFL/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from timeit import default_timer as timer
import numpy as np
import torch
import torch.optim as optim
from oneshotVFL.vfl_oneshot_workflow import OSVFLDataKind, OSVFLNNConstants
from sklearn.cluster import KMeans
# from oneshotVFL.cifar10_splitnn_dataset import CIFAR10SplitNN
from splitnn.cifar10_splitnn_dataset import CIFAR10SplitNN
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from nvflare.apis.dxo import DXO, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.decomposers import TensorDecomposer
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.fuel.utils import fobs
class CIFAR10LearnerOneshotVFL(Learner):
def __init__(
self,
dataset_root: str = "./dataset",
intersection_file: str = None,
lr: float = 1e-2,
model: dict = None,
analytic_sender_id: str = "analytic_sender",
fp16: bool = True,
val_freq: int = 1000,
):
"""Simple CIFAR-10 Trainer for split learning.
Args:
dataset_root: directory with CIFAR-10 data.
intersection_file: Optional. intersection file specifying overlapping indices between both clients.
Defaults to `None`, i.e. the whole training dataset is used.
lr: learning rate.
model: Split learning model.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
fp16: If `True`, convert activations and gradients send between clients to `torch.float16`.
Reduces bandwidth needed for communication but might impact model accuracy.
val_freq: how often to perform validation in rounds. Defaults to 1000. No validation if <= 0.
"""
super().__init__()
self.dataset_root = dataset_root
self.intersection_file = intersection_file
self.lr = lr
self.model = model
self.analytic_sender_id = analytic_sender_id
self.fp16 = fp16
self.val_freq = val_freq
self.target_names = None
self.app_root = None
self.current_round = None
self.num_rounds = None
self.batch_size = None
self.writer = None
self.client_name = None
self.other_client = None
self.device = None
self.optimizer = None
self.criterion = None
self.transform_train = None
self.transform_valid = None
self.train_dataset = None
self.valid_dataset = None
self.split_id = None
self.train_activations = None
self.train_batch_indices = None
self.train_size = 0
self.val_loss = []
self.val_labels = []
self.val_pred_labels = []
self.compute_stats_pool = None
self.encoder_epoch = 0
self.clf_epoch = 0
# use FOBS serializing/deserializing PyTorch tensors
fobs.register(TensorDecomposer)
def initialize(self, parts: dict, fl_ctx: FLContext):
t_start = timer()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9)
self.criterion = torch.nn.CrossEntropyLoss()
self.encoder_epoch = 200
self.clf_epoch = 50
self.transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Pad(4, padding_mode="reflect"),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.transform_valid = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self.client_name = fl_ctx.get_identity_name()
self.split_id = self.model.get_split_id()
self.log_info(fl_ctx, f"Running `split_id` {self.split_id} on site `{self.client_name}`")
if self.split_id == 0: # data side
data_returns = "image"
elif self.split_id == 1: # label side
data_returns = "label"
else:
raise ValueError(f"Expected split_id to be '0' or '1' but was {self.split_id}")
if self.intersection_file is not None:
_intersect_indices = np.loadtxt(self.intersection_file)
else:
_intersect_indices = None
self.train_dataset = CIFAR10SplitNN(
root=self.dataset_root,
train=True,
download=True,
transform=self.transform_train,
returns=data_returns,
intersect_idx=_intersect_indices,
)
self.valid_dataset = CIFAR10SplitNN(
root=self.dataset_root,
train=False,
download=False,
transform=self.transform_valid,
returns=data_returns,
intersect_idx=None, # TODO: support validation intersect indices
)
self.train_size = len(self.train_dataset)
if self.train_size <= 0:
raise ValueError(f"Expected train dataset size to be larger zero but got {self.train_size}")
self.log_info(fl_ctx, f"Training with {self.train_size} overlapping indices of {self.train_dataset.orig_size}.")
# Select local TensorBoard writer or event-based writer for streaming
if self.split_id == 1: # metrics can only be computed for client with labels
self.writer = parts.get(self.analytic_sender_id) # user configured config_fed_client.json for streaming
if not self.writer: # use local TensorBoard writer only
self.writer = SummaryWriter(self.app_root)
# register aux message handlers
engine = fl_ctx.get_engine()
if self.split_id == 1:
engine.register_aux_message_handler(
topic=OSVFLNNConstants.TASK_CALCULATE_GRADIENTS, message_handle_func=self._osvfl_calculate_gradients
)
engine.register_aux_message_handler(
topic=OSVFLNNConstants.TASK_TRAIN_LABEL_SIDE, message_handle_func=self._classifier_train_label_side
)
engine.register_aux_message_handler(
topic=OSVFLNNConstants.TASK_VALID, message_handle_func=self._valid_label_side
)
self.log_debug(fl_ctx, f"Registered aux message handlers for split_id {self.split_id}")
self.compute_stats_pool = StatsPoolManager.add_time_hist_pool(
"Compute_Time", "Compute time in secs", scope=self.client_name
)
self.compute_stats_pool.record_value(category="initialize", value=timer() - t_start)
""" training steps """
def _extract_features(self):
self.model.train()
features = []
for _, (inputs, _) in enumerate(self.train_dataloader_no_shuffle):
inputs = inputs.to(self.device)
features.append(self.model(inputs))
features = torch.cat(features, dim=0)
features = features.view(features.size(0), -1)
return features.detach().requires_grad_()
def _osvfl_calculate_gradients(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
"""train aux message handler"""
t_start = timer()
if self.split_id != 1:
raise ValueError(
f"Expected `split_id` 1. It doesn't make sense to run `_aux_train_label_side` with `split_id` {self.split_id}"
)
self.current_round = request.get_header(AppConstants.CURRENT_ROUND)
self.num_rounds = request.get_header(AppConstants.NUM_ROUNDS)
self.log_debug(fl_ctx, f"Calculate gradients in round {self.current_round} of {self.num_rounds} rounds.")
dxo = from_shareable(request)
if dxo.data_kind != OSVFLDataKind.FEATURES:
raise ValueError(f"Expected data kind {OSVFLDataKind.FEATURES} but received {dxo.data_kind}")
features = dxo.data.get(OSVFLNNConstants.DATA)
if features is None:
raise ValueError("No features in DXO!")
features = fobs.loads(features)
print(features.shape)
feature_dataset = copy.deepcopy(self.train_dataset)
if self.fp16:
features = features.type(torch.float32) # return to default pytorch precision
feature_dataset.data = features
feature_dataset.transform = None
feature_dataloader = torch.utils.data.DataLoader(feature_dataset, batch_size=self.batch_size, shuffle=False)
gradient = []
self.model.eval()
for _, (activations, labels) in enumerate(feature_dataloader):
activations, labels = activations.to(self.device), labels.to(self.device)
activations.requires_grad_(True)
self.optimizer.zero_grad()
pred = self.model.forward(activations)
loss = self.criterion(pred, labels)
loss.backward()
if not isinstance(activations.grad, torch.Tensor):
raise ValueError("No valid gradients available!")
# gradient to be returned to other client
if self.fp16:
gradient.append(activations.grad.type(torch.float16))
else:
gradient.append(activations.grad)
gradient = torch.cat(gradient).cpu().numpy()
self.log_debug(fl_ctx, "_osvfl_calculate_gradients finished.")
return_shareable = DXO(
data={OSVFLNNConstants.DATA: fobs.dumps(gradient)}, data_kind=OSVFLDataKind.GRADIENTS
).to_shareable()
self.compute_stats_pool.record_value(category="_osvfl_calculate_gradients", value=timer() - t_start)
self.log_debug(fl_ctx, f"Sending partial gradients return_shareable: {type(return_shareable)}")
return return_shareable
def _cluster_gradients(self, gradients, fl_ctx):
num_clusters = 10
kmeans = KMeans(n_clusters=num_clusters, random_state=0, n_init=1).fit(gradients)
cluster_labels = kmeans.labels_
self.log_info(fl_ctx, "_cluster_gradients finished.")
return cluster_labels
def _local_train(self, cluster_labels, fl_ctx):
l_labels = torch.LongTensor(cluster_labels)
local_train_dataset = copy.deepcopy(self.train_dataset)
local_train_dataset.target = l_labels
local_train_datloader = torch.utils.data.DataLoader(
local_train_dataset, batch_size=self.batch_size, shuffle=True
)
self.model.train()
for e in range(self.encoder_epoch):
loss_ep = []
for _, (inputs, labels) in enumerate(local_train_datloader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
pred = self.model.forward_complt(inputs)
loss = self.criterion(pred, labels)
loss.backward()
self.optimizer.step()
loss_ep.append(loss.item())
loss_epoch = sum(loss_ep) / len(loss_ep)
if e % 10 == 0:
self.model.eval()
correct = 0
for _, (inputs, labels) in enumerate(local_train_datloader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
pred = self.model.forward_complt(inputs)
_, pred_labels = torch.max(pred, 1)
correct += (pred_labels == labels).sum()
acc = correct / len(cluster_labels)
self.log_info(
fl_ctx,
f"Epoch {e}/{self.encoder_epoch} train_loss: {loss_epoch:.4f}, train_accuracy: {acc:.4f}",
)
def _classifier_train_label_side(self, topic: str, request: Shareable, fl_ctx: FLContext):
t_start = timer()
if self.split_id != 1:
raise ValueError(
f"Expected `split_id` 1. It doesn't make sense to run `_aux_train_label_side` with `split_id` {self.split_id}"
)
self.current_round = request.get_header(AppConstants.CURRENT_ROUND)
self.num_rounds = request.get_header(AppConstants.NUM_ROUNDS)
self.log_debug(fl_ctx, f"Calculate gradients in round {self.current_round} of {self.num_rounds} rounds.")
dxo = from_shareable(request)
if dxo.data_kind != OSVFLDataKind.FEATURES:
raise ValueError(f"Expected data kind {OSVFLDataKind.FEATURES} but received {dxo.data_kind}")
features = dxo.data.get(OSVFLNNConstants.DATA)
if features is None:
raise ValueError("No features in DXO!")
features = fobs.loads(features)
feature_dataset = copy.deepcopy(self.train_dataset)
if self.fp16:
features = features.type(torch.float32) # return to default pytorch precision
feature_dataset.data = features
feature_dataset.transform = None
feature_dataloader = torch.utils.data.DataLoader(feature_dataset, batch_size=self.batch_size, shuffle=False)
self.model.train()
for e in range(self.encoder_epoch):
loss_ep = []
for _, (inputs, labels) in enumerate(feature_dataloader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
pred = self.model.forward(inputs)
loss = self.criterion(pred, labels)
loss.backward()
self.optimizer.step()
loss_ep.append(loss.item())
loss_epoch = sum(loss_ep) / len(loss_ep)
if e % 10 == 0:
self.model.eval()
correct = 0
for _, (inputs, labels) in enumerate(feature_dataloader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
pred = self.model.forward(inputs)
_, pred_labels = torch.max(pred, 1)
correct += (pred_labels == labels).sum()
acc = correct / features.shape[0]
self.log_info(
fl_ctx,
f"Label Side Epoch {e}/{self.encoder_epoch} train_loss: {loss_epoch:.4f}, train_accuracy: {acc:.4f}",
)
return make_reply(ReturnCode.OK)
def _valid_label_side(self, topic: str, request: Shareable, fl_ctx: FLContext):
t_start = timer()
if self.split_id != 1:
raise ValueError(
f"Expected `split_id` 1. It doesn't make sense to run `_aux_train_label_side` with `split_id` {self.split_id}"
)
dxo = from_shareable(request)
if dxo.data_kind != OSVFLDataKind.FEATURES:
raise ValueError(f"Expected data kind {OSVFLDataKind.FEATURES} but received {dxo.data_kind}")
features = dxo.data.get(OSVFLNNConstants.DATA)
if features is None:
raise ValueError("No features in DXO!")
features = fobs.loads(features)
feature_dataset = copy.deepcopy(self.valid_dataset)
if self.fp16:
features = features.type(torch.float32) # return to default pytorch precision
feature_dataset.data = features
feature_dataset.transform = None
feature_dataloader = torch.utils.data.DataLoader(feature_dataset, batch_size=self.batch_size, shuffle=False)
self.model.eval()
correct = 0
for _, (inputs, labels) in enumerate(feature_dataloader):
inputs, labels = inputs.to(self.device), labels.to(self.device)
pred = self.model.forward(inputs)
_, pred_labels = torch.max(pred, 1)
correct += (pred_labels == labels).sum()
acc = correct / features.shape[0]
self.log_info(
fl_ctx,
f"Label Side test_accuracy: {acc:.4f}",
)
return make_reply(ReturnCode.OK)
# Model initialization task (one time only in beginning)
def init_model(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
t_start = timer()
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed.") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError("No global weights loaded!")
self.compute_stats_pool.record_value(category="init_model", value=timer() - t_start)
self.log_info(fl_ctx, "init_model finished.")
return make_reply(ReturnCode.OK)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
t_start = timer()
"""main training logic"""
engine = fl_ctx.get_engine()
self.num_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
if not self.num_rounds:
raise ValueError("No number of rounds available.")
self.batch_size = shareable.get_header(OSVFLNNConstants.BATCH_SIZE)
self.target_names = np.asarray(
shareable.get_header(OSVFLNNConstants.TARGET_NAMES)
) # convert to array for string matching below
self.other_client = self.target_names[self.target_names != self.client_name][0]
self.log_info(fl_ctx, f"Starting training of {self.num_rounds} rounds with batch size {self.batch_size}")
gradients = None # initial gradients
if self.split_id != 0:
self.compute_stats_pool.record_value(category="train", value=timer() - t_start)
return make_reply(ReturnCode.OK) # only run this logic on first site
self.train_dataloader_shuffle = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True
)
self.train_dataloader_no_shuffle = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=False
)
self.valid_dataloader = torch.utils.data.DataLoader(
self.valid_dataset, batch_size=self.batch_size, shuffle=False
)
for _curr_round in range(self.num_rounds):
self.current_round = _curr_round
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_debug(fl_ctx, f"Starting current round={self.current_round} of {self.num_rounds}.")
# self.train_batch_indices = np.random.randint(0, self.train_size - 1, self.batch_size)
# first round: site-1 extracts features and send to site-2; site-2 return gradients back to site-1
if _curr_round == 0:
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self.current_round, private=True, sticky=False)
# site-1 extract features
features = self._extract_features()
# package features
dxo = DXO(data={OSVFLNNConstants.DATA: fobs.dumps(features)}, data_kind=OSVFLDataKind.FEATURES)
data_shareable = dxo.to_shareable()
# add meta data for transmission
data_shareable.set_header(AppConstants.CURRENT_ROUND, self.current_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, self.num_rounds)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, self.current_round)
# send clustering request to site-2
result = engine.send_aux_request(
targets=self.other_client,
topic=OSVFLNNConstants.TASK_CALCULATE_GRADIENTS,
request=data_shareable,
timeout=OSVFLNNConstants.TIMEOUT,
fl_ctx=fl_ctx,
)
# check returned results (gradients)
shareable = result.get(self.other_client)
if shareable is not None:
dxo = from_shareable(shareable)
if dxo.data_kind != OSVFLDataKind.GRADIENTS:
raise ValueError(f"Expected data kind {OSVFLDataKind.GRADIENTS} but received {dxo.data_kind}")
gradients = dxo.data.get(OSVFLNNConstants.DATA)
gradients = fobs.loads(gradients)
else:
raise ValueError(f"No message returned from {self.other_client}!")
# second round: site-1 conducts clustering, local training, and sending features to site-2;
# site-2 trains the classifier
elif _curr_round == 1:
# site-1 conducts clustering and local training
cluster_labels = self._cluster_gradients(gradients, fl_ctx)
self._local_train(cluster_labels, fl_ctx)
features = self._extract_features()
# site-1 packages features
dxo = DXO(data={OSVFLNNConstants.DATA: fobs.dumps(features)}, data_kind=OSVFLDataKind.FEATURES)
data_shareable = dxo.to_shareable()
data_shareable.set_header(AppConstants.CURRENT_ROUND, self.current_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, self.num_rounds)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, self.current_round)
# site-1 sends features to site-2; site-2 trains the classifier
engine.send_aux_request(
targets=self.other_client,
topic=OSVFLNNConstants.TASK_TRAIN_LABEL_SIDE,
request=data_shareable,
timeout=OSVFLNNConstants.TIMEOUT,
fl_ctx=fl_ctx,
)
try:
self._validate(fl_ctx)
except Exception as e:
self.log_info(fl_ctx, "Valiate exit with exception {}".format(e))
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
self.log_debug(fl_ctx, f"Ending current round={self.current_round}.")
# if self.val_freq > 0:
# if _curr_round % self.val_freq == 0:
# self._validate(fl_ctx)
self.compute_stats_pool.record_value(category="train", value=timer() - t_start)
return make_reply(ReturnCode.OK)
def _validate(self, fl_ctx: FLContext):
t_start = timer()
engine = fl_ctx.get_engine()
self.model.eval()
features = []
for _, (inputs, _) in enumerate(self.valid_dataloader):
inputs = inputs.to(self.device)
features.append(self.model(inputs))
features = torch.cat(features, dim=0)
features = features.view(features.size(0), -1)
dxo = DXO(data={OSVFLNNConstants.DATA: fobs.dumps(features)}, data_kind=OSVFLDataKind.FEATURES)
data_shareable = dxo.to_shareable()
# send to other side to validate
engine.send_aux_request(
targets=self.other_client,
topic=OSVFLNNConstants.TASK_VALID,
request=data_shareable,
timeout=OSVFLNNConstants.TIMEOUT,
fl_ctx=fl_ctx,
)
self.compute_stats_pool.record_value(category="_validate", value=timer() - t_start)
self.log_debug(fl_ctx, "finished validation.")
| NVFlare-main | research/one-shot-vfl/src/oneshotVFL/cifar10_learner_oneshotVFL.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.client import Client
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Controller, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.security.logging import secure_format_exception
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
class OSVFLDataKind(object):
FEATURES = "_osvfl_features_"
GRADIENTS = "_osvfl_gradients_"
CLUSTERS = "_osvfl_clusters_"
class OSVFLNNConstants(object):
BATCH_INDICES = "_osvfl_batch_indices_"
DATA = "_osvfl_data_"
BATCH_SIZE = "_osvfl_batch_size_"
TARGET_NAMES = "_osvfl_target_names_"
TASK_INIT_MODEL = "_osvfl_task_init_model_"
TASK_VALID = "_osvfl_task_valid_"
TASK_TRAIN_LABEL_SIDE = "_osvfl_task_train_label_side_"
TASK_CLUSTER = "_osvfl_cluster_features_"
TASK_CALCULATE_GRADIENTS = "_osvfl_calculate_gradients_"
TASK_TRAIN = "_osvfl_task_train_"
TASK_RESULT = "_splitnn_task_result_"
TIMEOUT = 60.0 # timeout for waiting for reply from aux message request
class OSVFLController(Controller):
def __init__(
self,
num_rounds: int = 5000,
start_round: int = 0,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID, # used to init the models on both clients
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
init_model_task_name=OSVFLNNConstants.TASK_INIT_MODEL,
train_task_name=OSVFLNNConstants.TASK_TRAIN,
task_timeout: int = 10,
ignore_result_error: bool = True,
batch_size: int = 256,
):
"""The controller for Split Learning Workflow.
The SplitNNController workflow defines Federated training on all clients.
The model persistor (persistor_id) is used to load the initial global model which is sent to all clients.
Each clients sends it's updated weights after local training which is aggregated (aggregator_id). The
shareable generator is used to convert the aggregated weights to shareable and shareable back to weights.
The model_persistor also saves the model after training.
Args:
num_rounds (int, optional): The total number of training rounds. Defaults to 5.
start_round (int, optional): Start round for training. Defaults to 0.
persistor_id (str, optional): ID of the persistor component. Defaults to "persistor".
shareable_generator_id (str, optional): ID of the shareable generator. Defaults to "shareable_generator".
init_model_task_name: Task name used to initialize the local models.
train_task_name: Task name used for split learning.
task_timeout (int, optional): timeout (in sec) to determine if one client fails
to request the task which it is assigned to. Defaults to 10.
ignore_result_error (bool, optional): whether this controller can proceed if result has errors. Defaults to True.
Raises:
TypeError: when any of input arguments does not have correct type
ValueError: when any of input arguments is out of range
"""
Controller.__init__(self)
# Check arguments
if not isinstance(num_rounds, int):
raise TypeError("`num_rounds` must be int but got {}".format(type(num_rounds)))
if not isinstance(start_round, int):
raise TypeError("`start_round` must be int but got {}".format(type(start_round)))
if not isinstance(task_timeout, int):
raise TypeError("`train_timeout` must be int but got {}".format(type(task_timeout)))
if not isinstance(persistor_id, str):
raise TypeError("`persistor_id` must be a string but got {}".format(type(persistor_id)))
if not isinstance(shareable_generator_id, str):
raise TypeError("`shareable_generator_id` must be a string but got {}".format(type(shareable_generator_id)))
if not isinstance(init_model_task_name, str):
raise TypeError("`init_model_task_name` must be a string but got {}".format(type(init_model_task_name)))
if not isinstance(train_task_name, str):
raise TypeError("`train_task_name` must be a string but got {}".format(type(train_task_name)))
if num_rounds < 0:
raise ValueError("num_rounds must be greater than or equal to 0.")
if start_round < 0:
raise ValueError("start_round must be greater than or equal to 0.")
self.persistor_id = persistor_id
self.shareable_generator_id = shareable_generator_id
self.persistor = None
self.shareable_generator = None
# config data
self._num_rounds = num_rounds
self._start_round = start_round
self._task_timeout = task_timeout
self.ignore_result_error = ignore_result_error
# workflow phases: init, train, validate
self._phase = AppConstants.PHASE_INIT
self._global_weights = None
self._current_round = None
# task names
self.init_model_task_name = init_model_task_name
self.train_task_name = train_task_name
self.targets_names = ["site-1", "site-2"]
self.nr_supported_clients = 2
self.batch_size = batch_size
def start_controller(self, fl_ctx: FLContext):
self.log_debug(fl_ctx, "starting controller")
self.persistor = fl_ctx.get_engine().get_component(self.persistor_id)
self.shareable_generator = fl_ctx.get_engine().get_component(self.shareable_generator_id)
if not isinstance(self.persistor, LearnablePersistor):
self.system_panic(
f"Persistor {self.persistor_id} must be a Persistor instance, but got {type(self.persistor)}", fl_ctx
)
if not isinstance(self.shareable_generator, ShareableGenerator):
self.system_panic(
f"Shareable generator {self.shareable_generator_id} must be a Shareable Generator instance, "
f"but got {type(self.shareable_generator)}",
fl_ctx,
)
# initialize global model
fl_ctx.set_prop(AppConstants.START_ROUND, self._start_round, private=True, sticky=True)
fl_ctx.set_prop(AppConstants.NUM_ROUNDS, self._num_rounds, private=True, sticky=False)
self._global_weights = self.persistor.load(fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_weights, private=True, sticky=True)
self.fire_event(AppEventType.INITIAL_MODEL_LOADED, fl_ctx)
def _process_result(self, client_task: ClientTask, fl_ctx: FLContext) -> bool:
# submitted shareable is stored in client_task.result
# we need to update task.data with that shareable so the next target
# will get the updated shareable
task = client_task.task
result = client_task.result
rc = result.get_return_code()
if rc and rc != ReturnCode.OK:
if self.ignore_result_error:
self.log_error(fl_ctx, f"Ignore the task {task} result. Train result error code: {rc}")
return False
else:
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.system_panic(
f"Peer context for task {task} is bad or missing. SplitNNController exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.system_panic(
f"Execution Exception in client task {task}. SplitNNController exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
]:
self.system_panic(
f"Execution result for task {task} is not a shareable. SplitNNController exiting.",
fl_ctx=fl_ctx,
)
return False
# assign result to current task
if result:
task.set_prop(OSVFLNNConstants.TASK_RESULT, result)
return True
def _check_targets(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
targets = engine.get_clients()
for t in targets:
if t.name not in self.targets_names:
self.system_panic(f"Client {t.name} not in expected target names: {self.targets_names}", fl_ctx)
def _init_models(self, abort_signal: Signal, fl_ctx: FLContext):
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"OneshotVFL initializing model {self.targets_names}.")
# Create init_model_task_name
data_shareable: Shareable = self.shareable_generator.learnable_to_shareable(self._global_weights, fl_ctx)
task = Task(
name=self.init_model_task_name,
data=data_shareable,
result_received_cb=self._process_result,
)
self.broadcast_and_wait(
task=task,
min_responses=self.nr_supported_clients,
wait_time_after_min_received=0,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
def _train(self, abort_signal: Signal, fl_ctx: FLContext):
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"SplitNN training starting with {self.targets_names}.")
# Create train_task
data_shareable: Shareable = Shareable()
data_shareable.set_header(AppConstants.NUM_ROUNDS, self._num_rounds)
data_shareable.set_header(OSVFLNNConstants.BATCH_SIZE, self.batch_size)
data_shareable.set_header(OSVFLNNConstants.TARGET_NAMES, self.targets_names)
task = Task(
name=self.train_task_name,
data=data_shareable,
result_received_cb=self._process_result,
)
# By default the request is broadcast to all the clients...
self.broadcast_and_wait(
task=task,
min_responses=self.nr_supported_clients,
wait_time_after_min_received=0,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext):
try:
self._check_targets(fl_ctx)
self.log_debug(fl_ctx, f"Train with on {self.targets_names}")
# 1. initialize models on clients
self._init_models(abort_signal=abort_signal, fl_ctx=fl_ctx)
# 2. Start split learning
self._phase = AppConstants.PHASE_TRAIN
self._train(abort_signal=abort_signal, fl_ctx=fl_ctx)
self._phase = AppConstants.PHASE_FINISHED
self.log_debug(fl_ctx, "SplitNN training ended.")
except BaseException as e:
error_msg = f"SplitNN control_flow exception {secure_format_exception(e)}"
self.log_error(fl_ctx, error_msg)
self.system_panic(error_msg, fl_ctx)
def stop_controller(self, fl_ctx: FLContext):
self._phase = AppConstants.PHASE_FINISHED
self.log_debug(fl_ctx, "controller stopped")
def process_result_of_unknown_task(
self,
client: Client,
task_name: str,
client_task_id: str,
result: Shareable,
fl_ctx: FLContext,
):
self.log_warning(fl_ctx, f"Dropped result of unknown task: {task_name} from client {client.name}.")
def handle_event(self, event_type: str, fl_ctx: FLContext):
super().handle_event(event_type, fl_ctx)
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR, None)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError("collector must be GroupInfoCollector but got {}".format(type(collector)))
collector.add_info(
group_name=self._name,
info={"phase": self._phase, "current_round": self._current_round, "num_rounds": self._num_rounds},
)
| NVFlare-main | research/one-shot-vfl/src/oneshotVFL/vfl_oneshot_workflow.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oneshotVFL.vfl_oneshot_workflow import OSVFLNNConstants
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.security.logging import secure_format_exception
class OSVFLLearnerExecutor(Executor):
def __init__(
self,
learner_id,
init_model_task_name=OSVFLNNConstants.TASK_INIT_MODEL,
train_task_name=OSVFLNNConstants.TASK_TRAIN,
):
"""Key component to run learner on clients.
Args:
learner_id (str): id pointing to the learner object
train_task_name (str, optional): label to dispatch train task. Defaults to AppConstants.TASK_TRAIN.
submit_model_task_name (str, optional): label to dispatch submit model task. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task_name (str, optional): label to dispatch validation task. Defaults to AppConstants.TASK_VALIDATION.
"""
super().__init__()
self.learner_id = learner_id
self.learner = None
self.init_model_task_name = init_model_task_name
self.train_task_name = train_task_name
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.initialize(fl_ctx)
elif event_type == EventType.ABORT_TASK:
try:
if self.learner:
self.learner.abort(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner abort exception: {secure_format_exception(e)}")
elif event_type == EventType.END_RUN:
self.finalize(fl_ctx)
def initialize(self, fl_ctx: FLContext):
try:
self.log_info(fl_ctx, f"Task:{self.init_model_task_name, self.train_task_name}")
engine = fl_ctx.get_engine()
self.learner = engine.get_component(self.learner_id)
if not isinstance(self.learner, Learner):
raise TypeError(f"learner must be Learner type. Got: {type(self.learner)}")
self.learner.initialize(engine.get_all_components(), fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner initialize exception: {secure_format_exception(e)}")
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
self.log_info(fl_ctx, f"Executing task {task_name}...")
try:
if task_name == self.init_model_task_name:
self.log_info(fl_ctx, "Initializing model...")
return self.learner.init_model(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
elif task_name == self.train_task_name:
self.log_info(fl_ctx, "Running training...")
return self.learner.train(shareable=shareable, fl_ctx=fl_ctx, abort_signal=abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"learner execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def finalize(self, fl_ctx: FLContext):
try:
if self.learner:
self.learner.finalize(fl_ctx)
except Exception as e:
self.log_exception(fl_ctx, f"learner finalize exception: {secure_format_exception(e)}")
| NVFlare-main | research/one-shot-vfl/src/oneshotVFL/vfl_oneshot_learner_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import torch
from monai.data import CacheDataset, DataLoader, load_decathlon_datalist
from monai.inferers import SimpleInferer
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.transforms import (
Activations,
AsDiscrete,
AsDiscreted,
Compose,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
Resized,
ScaleIntensityRanged,
)
from vgg import vgg11
model_postfix = "_best_FL_global_model.pt"
client_id_labels = ["client_I2CVB", "client_MSD", "client_NCI_ISBI_3T"]
def main():
parser = argparse.ArgumentParser(description="Model Testing")
parser.add_argument("--models_dir", type=str)
parser.add_argument("--cache_rate", default=0.0, type=float)
parser.add_argument("--select_threshold", default=0.9, type=float)
parser.add_argument("--dataset_base_dir", default="DATASET_ROOT/dataset_2D", type=str)
parser.add_argument(
"--datalist_json_path",
default="DATASET_ROOT/datalist_2D/client_All.json",
type=str,
)
args = parser.parse_args()
# Set basic settings and paths
dataset_base_dir = args.dataset_base_dir
datalist_json_path = args.datalist_json_path
models_dir = args.models_dir
cache_rate = args.cache_rate
select_threshold = args.select_threshold
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set datalists
test_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="testing",
base_dir=dataset_base_dir,
)
print(f"Testing Size: {len(test_list)}")
# Network, optimizer, and loss
num_site = len(client_id_labels)
model_select = vgg11(num_classes=num_site).to(device)
model_global = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
model_person = []
for site in range(num_site):
model_person.append(
UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
)
model_path = models_dir + "global_weights" + model_postfix
model_stat_dict = torch.load(model_path)
for var_name in model_stat_dict:
model_stat_dict[var_name] = torch.as_tensor(model_stat_dict[var_name])
model_global.load_state_dict(model_stat_dict)
model_global.eval()
model_path = models_dir + "select_weights" + model_postfix
model_stat_dict = torch.load(model_path)
for var_name in model_stat_dict:
model_stat_dict[var_name] = torch.as_tensor(model_stat_dict[var_name])
model_select.load_state_dict(model_stat_dict)
model_select.eval()
for site in range(num_site):
model_path = models_dir + client_id_labels[site] + model_postfix
model_stat_dict = torch.load(model_path)
for var_name in model_stat_dict:
model_stat_dict[var_name] = torch.as_tensor(model_stat_dict[var_name])
model_person[site].load_state_dict(model_stat_dict)
model_person[site].eval()
# Inferer, evaluation metric
inferer_select = SimpleInferer()
inferer_segment = SimpleInferer()
valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
Resized(
keys=["image", "label"],
spatial_size=(256, 256),
mode=("bilinear"),
align_corners=True,
),
AsDiscreted(keys=["label"], threshold=0.5),
EnsureTyped(keys=["image", "label"]),
]
)
transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
test_dataset = CacheDataset(
data=test_list,
transform=transform,
cache_rate=cache_rate,
num_workers=4,
)
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
)
model_select.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(test_loader):
images = batch_data["image"].to(device)
labels = batch_data["label"].to(device)
# Inference
# get the selector result
outputs_select = inferer_select(images, model_select)
score = torch.nn.functional.softmax(outputs_select).cpu().numpy()
score = np.squeeze(score)
max_index = np.argmax(score)
max_score = score[max_index]
# get max score and determine which model to use
if max_score > select_threshold:
model_segment = model_person[max_index]
else:
model_segment = model_global
# segmentation inference
outputs_segment = inferer_segment(images, model_segment)
outputs_segment = transform_post(outputs_segment)
# Compute metric
metric_score = valid_metric(y_pred=outputs_segment, y=labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(test_loader)
print(f"Test Dice: {metric:.4f}")
if __name__ == "__main__":
main()
| NVFlare-main | research/fed-sm/result_stat/prostate_2d_test_only.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Union, cast
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = [
"VGG",
"vgg11",
]
model_urls = {
"vgg11": "https://download.pytorch.org/models/vgg11-8a719046.pth",
}
class VGG(nn.Module):
def __init__(self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
feature = torch.flatten(x, 1)
x = self.classifier(feature)
return x
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 1
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"]}
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
if pretrained:
kwargs["init_weights"] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11", "A", False, pretrained, progress, **kwargs)
| NVFlare-main | research/fed-sm/result_stat/vgg.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
# simulator workspace
client_results_root = "../workspaces/fedsm_prostate/"
client_pre = "app_client_"
# 3 sites
sites_fl = ["I2CVB", "MSD", "NCI_ISBI_3T"]
metrics = {
"global_model": "val_metric_global_model",
"personal_model": "val_metric_person_model",
"selector_model": "val_metric_select_model",
}
weight = 0.8
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def read_eventfile(filepath, tags=["val_metric_global_model"]):
data = {}
for summary in tf.compat.v1.train.summary_iterator(filepath):
for v in summary.summary.value:
if v.tag in tags:
if v.tag in data.keys():
data[v.tag].append([summary.step, v.simple_value])
else:
data[v.tag] = [[summary.step, v.simple_value]]
return data
def add_eventdata(data, site, filepath, tag="val_metric_global_model"):
event_data = read_eventfile(filepath, tags=[tag])
assert len(event_data[tag]) > 0, f"No data for key {tag}"
metric = []
for e in event_data[tag]:
# print(e)
data["Site"].append(site)
data["Epoch"].append(e[0])
metric.append(e[1])
metric = smooth(metric, weight)
for entry in metric:
data["Metric"].append(entry)
print(f"added {len(event_data[tag])} entries for {tag}")
def main():
plt.figure()
num_metric = len(metrics)
i = 1
for metric in metrics.keys():
# clear data for each metric
data = {"Site": [], "Epoch": [], "Metric": []}
for site in sites_fl:
record_path = os.path.join(client_results_root, "simulate_job", client_pre + site, "events.*")
eventfile = glob.glob(record_path, recursive=True)
assert len(eventfile) == 1, "No unique event file found!"
eventfile = eventfile[0]
print("adding", eventfile)
add_eventdata(data, site, eventfile, tag=metrics[metric])
ax = plt.subplot(1, int(num_metric), i)
ax.set_title(metric)
sns.lineplot(x="Epoch", y="Metric", hue="Site", data=data)
if metric == "selector_model":
plt.ylabel("Accuracy (%)")
else:
plt.ylabel("Dice")
i = i + 1
plt.subplots_adjust(hspace=0.3)
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | research/fed-sm/result_stat/plot_tensorboard_events.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.app_constant import AppConstants
class _AccuItem(object):
def __init__(self, client, data, steps):
self.client = client
self.data = data
self.steps = steps
class AccumulateWeightedAggregatorFedSM(Aggregator):
def __init__(
self,
soft_pull_lambda=0.7,
exclude_vars_global=None,
exclude_vars_person=None,
exclude_vars_select=None,
aggregation_weights=None,
):
"""FedSM aggregator.
This aggregator performs two types of aggregation among received shareables from clients:
weighted arithmetic average for model_global and model_select
SoftPull aggregation for model_person
Args:
soft_pull_lambda: the control weight for generating person models via soft pull mechanism. Defaults to 0.3.
exclude_vars (list, optional) for three models (_global/person/select): if not specified (None), all layers are included;
if list of variable/layer names, only specified variables are excluded;
if string containing regular expression (e.g. "conv"), only matched variables are being excluded.
Defaults to None.
aggregation_weights (dict, optional): a mapping from client names to weights. Defaults to None.
Raises:
ValueError: if data_kind is neither WEIGHT_DIFF nor WEIGHTS
"""
super().__init__()
self.soft_pull_lambda = soft_pull_lambda
self.exclude_vars_global = re.compile(exclude_vars_global) if exclude_vars_global else None
self.exclude_vars_person = re.compile(exclude_vars_person) if exclude_vars_person else None
self.exclude_vars_select = re.compile(exclude_vars_select) if exclude_vars_select else None
self.aggregation_weights = aggregation_weights or {}
self.logger.debug(f"aggregation weights control: {aggregation_weights}")
# FedSM aggregator expects "COLLECTION" DXO containing all three models.
self.expected_data_kind = DataKind.COLLECTION
self.accumulator = []
self.warning_count = dict()
self.warning_limit = 10
def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
if dxo.data_kind != self.expected_data_kind:
self.log_error(
fl_ctx,
f"FedSM aggregator expect {self.expected_data_kind} but got {dxo.data_kind}",
)
return False
processed_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)
if processed_algorithm is not None:
self.log_error(fl_ctx, f"unable to accept shareable processed by {processed_algorithm}")
return False
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_debug(fl_ctx, f"current_round: {current_round}")
client_name = shareable.get_peer_prop(key=ReservedKey.IDENTITY_NAME, default="?")
contribution_round = shareable.get_cookie(AppConstants.CONTRIBUTION_ROUND)
rc = shareable.get_return_code()
if rc and rc != ReturnCode.OK:
self.log_info(
fl_ctx,
f"Client {client_name} returned rc: {rc}. Disregarding contribution.",
)
return False
data = dxo.data
if data is None:
self.log_error(fl_ctx, "no data to aggregate")
return False
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND)
if contribution_round == current_round:
if not self._client_in_accumulator(client_name):
self.accumulator.append(_AccuItem(client_name, data, n_iter))
accepted = True
else:
self.log_info(
fl_ctx,
f"Discarded: Current round: {current_round} contributions already include client: {client_name}",
)
accepted = False
else:
self.log_info(
fl_ctx,
f"Discarded the contribution from {client_name} for round: {contribution_round}. Current round is: {current_round}",
),
accepted = False
return accepted
def _client_in_accumulator(self, client_name):
return any(client_name == item.client for item in self.accumulator)
def aggregate(self, fl_ctx: FLContext) -> Shareable:
"""Aggregate model variables.
This function is not thread-safe.
Args:
fl_ctx (FLContext): System-wide FL Context
Returns:
Shareable: Return True to indicates the current model is the best model so far.
"""
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(
fl_ctx,
f"aggregating {len(self.accumulator)} updates at round {current_round}",
)
aggregated_model_dict = {}
# regular weighted average aggregation for global and selector models and parameters
model_ids = [
"global_weights",
"select_weights",
"select_exp_avg",
"select_exp_avg_sq",
]
select_models = {}
for model_id in model_ids:
acc_vars = [set(acc.data[model_id].data.keys()) for acc in self.accumulator]
acc_vars = set.union(*acc_vars) if acc_vars else acc_vars
# update vars that are not in exclude pattern
if model_id == "global_weights":
exclude_vars = self.exclude_vars_global
elif model_id == "select_weights":
exclude_vars = self.exclude_vars_select
vars_to_aggregate = (
[g_var for g_var in acc_vars if not exclude_vars.search(g_var)] if exclude_vars else acc_vars
)
aggregated_model = {}
for v_name in vars_to_aggregate:
n_local_iters, np_vars = [], []
for item in self.accumulator:
client_name = item.client
data = item.data[model_id].data
n_iter = item.steps
if n_iter is None:
if self.warning_count.get(client_name, 0) <= self.warning_limit:
self.log_warning(
fl_ctx,
f"NUM_STEPS_CURRENT_ROUND missing in meta of shareable"
f" from {client_name} and set to default value, 1.0. "
f" This kind of message will show {self.warning_limit} times at most.",
)
if client_name in self.warning_count:
self.warning_count[client_name] = self.warning_count[client_name] + 1
else:
self.warning_count[client_name] = 0
n_iter = 1.0
if v_name not in data.keys():
continue # this acc doesn't have the variable from client
float_n_iter = float(n_iter)
n_local_iters.append(float_n_iter)
weighted_value = data[v_name] * float_n_iter
np_vars.append(weighted_value)
if not n_local_iters:
continue # all acc didn't receive the variable from clients
new_val = np.sum(np_vars, axis=0) / np.sum(n_local_iters)
aggregated_model[v_name] = new_val
# make aggregated weights a DXO and add to dict
if model_id == "global_weights":
dxo_weights = DXO(data_kind=DataKind.WEIGHT_DIFF, data=aggregated_model)
aggregated_model_dict[model_id] = dxo_weights
elif model_id == "select_weights":
dxo_weights = DXO(data_kind=DataKind.WEIGHT_DIFF, data=aggregated_model)
select_models[model_id] = dxo_weights
else:
dxo_weights = DXO(data_kind=DataKind.WEIGHTS, data=aggregated_model)
select_models[model_id] = dxo_weights
aggregated_model_dict["select_weights"] = select_models
# SoftPull for personalized models
# initialize the personalized model set dict
aggregated_model = {}
model_id = "person_weights"
for item in self.accumulator:
client_name = item.client
aggregated_model[client_name] = {}
# exclude specified vars
acc_vars = [set(acc.data[model_id].data.keys()) for acc in self.accumulator]
acc_vars = set.union(*acc_vars) if acc_vars else acc_vars
exclude_vars = self.exclude_vars_select
vars_to_aggregate = (
[g_var for g_var in acc_vars if not exclude_vars.search(g_var)] if exclude_vars else acc_vars
)
# SoftPull aggregation, weighted with soft pull lambda
for v_name in vars_to_aggregate:
# np_vars for personalized model set is a dict for each client
# initialize
np_vars = {}
for item in self.accumulator:
client_name = item.client
np_vars[client_name] = []
# go over the accumulator for aggregation
for item in self.accumulator:
client_name = item.client
data = item.data[model_id].data
if v_name not in data.keys():
continue # this acc doesn't have the variable from client
for aggr_item in self.accumulator:
aggr_client_name = aggr_item.client
if aggr_client_name == client_name:
weighted_value = data[v_name] * self.soft_pull_lambda
else:
weighted_value = data[v_name] * (1 - self.soft_pull_lambda) / (len(self.accumulator) - 1)
np_vars[aggr_client_name].append(weighted_value)
for item in self.accumulator:
client_name = item.client
new_val = np.sum(np_vars[client_name], axis=0)
aggregated_model[client_name][v_name] = new_val
# make aggregated weights a DXO and add to dict
person_models = {}
for client_name in aggregated_model.keys():
dxo_weights = DXO(data_kind=DataKind.WEIGHTS, data=aggregated_model[client_name])
person_models[client_name] = dxo_weights
aggregated_model_dict["person_weights"] = person_models
self.accumulator.clear()
self.log_debug(fl_ctx, f"Model after aggregation: {aggregated_model_dict}")
dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_model_dict)
return dxo.to_shareable()
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/aggregators/accumulate_model_aggregator_fedsm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import traceback
from nvflare.apis.controller_spec import Task, TaskCompletionStatus
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather
_TASK_KEY_DONE = "___done"
class ScatterAndGatherFedSM(ScatterAndGather):
def __init__(
self,
client_id_label_mapping,
parallel_task: int = 1,
min_clients: int = 1,
num_rounds: int = 5,
start_round: int = 0,
wait_time_after_min_received: int = 10,
aggregator_id=AppConstants.DEFAULT_AGGREGATOR_ID,
persistor_id="persistor_fedsm",
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
train_task_name=AppConstants.TASK_TRAIN,
train_timeout: int = 0,
ignore_result_error: bool = True,
):
"""FedSM Workflow. The ScatterAndGatherFedSM workflow defines federated training on all clients.
FedSM involves training, aggregating, and persisting three types of models:
- selector model: FL the same as regular FedAvg
- global model: FL the same as regular FedAvg
- personalized models: one for each candidate site, FL aggregation with SoftPull
client_id_label_mapping is needed for training selector model
All models are combined to be persisted by a single persistor (as required by NVFlare framework)
in order to load/save the initial models sent to clients according to the model and client IDs.
- persistor_fedsm is customized for persisting FedSM model set (a dict of models) for all clients
Each client sends it's updated three weights after local training, to be aggregated accordingly by aggregator,
we use one customized aggregator to handle all three models,
global and selector models following standard weighted average, while personalized models following SoftPull
The shareable generator is used to convert the aggregated weights to shareable, and shareable back to weights.
Args:
client_id_label_mapping: needed for training selector model, no Default.
parallel_task (int, optional): sequential or parallel task. Defaults to 1 (parallel).
min_clients (int, optional): Min number of clients in training. Defaults to 1.
num_rounds (int, optional): The total number of training rounds. Defaults to 5.
start_round (int, optional): Start round for training. Defaults to 0.
wait_time_after_min_received (int, optional): Time to wait before beginning aggregation after contributions received. Defaults to 10.
train_timeout (int, optional): Time to wait for clients to do local training.
aggregator_id (str, optional): ID of the aggregator component for FedSM models. Defaults to "aggregator".
persistor_id_fedsm (str, optional): ID of the persistor component for FedSM models. Defaults to "persistor_fedsm".
shareable_generator_id (str, optional): ID of the shareable generator. Defaults to "shareable_generator".
train_task_name (str, optional): Name of the train task. Defaults to "train".
"""
super().__init__(
min_clients=min_clients,
num_rounds=num_rounds,
start_round=start_round,
wait_time_after_min_received=wait_time_after_min_received,
aggregator_id=aggregator_id,
persistor_id=persistor_id,
shareable_generator_id=shareable_generator_id,
train_task_name=train_task_name,
train_timeout=train_timeout,
ignore_result_error=ignore_result_error,
)
# extras for FedSM
# client_id to label mapping for selector
self.parallel_task = parallel_task
self.client_id_label_mapping = client_id_label_mapping
self.participating_clients = None
def start_controller(self, fl_ctx: FLContext) -> None:
super().start_controller(fl_ctx=fl_ctx)
self.log_info(fl_ctx, "Initializing FedSM-specific workflow components.")
self.log_info(
fl_ctx,
f"Client_ID to selector label mapping: {self.client_id_label_mapping}",
)
# get engine
engine = fl_ctx.get_engine()
if not engine:
self.system_panic("Engine not found. ScatterAndGather exiting.", fl_ctx)
return
# Get all clients
clients = engine.get_clients()
self.participating_clients = [c.name for c in clients]
self.log_info(fl_ctx, f"Participating clients: {self.participating_clients}")
# Validate client info
for client_id in self.participating_clients:
if client_id not in self.client_id_label_mapping.keys():
self.system_panic(
f"Client {client_id} not found in the id_label mapping. Please double check. ScatterAndGatherFedSM exiting.",
fl_ctx,
)
return
def _wait_for_task(self, task: Task, abort_signal: Signal):
task.props[_TASK_KEY_DONE] = False
task.task_done_cb = self._process_finished_task(task=task, func=task.task_done_cb)
while True:
if task.completion_status is not None:
break
if abort_signal and abort_signal.triggered:
self.cancel_task(task, fl_ctx=None, completion_status=TaskCompletionStatus.ABORTED)
break
task_done = task.props[_TASK_KEY_DONE]
if task_done:
break
time.sleep(self._task_check_period)
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext) -> None:
try:
self.log_info(fl_ctx, "Beginning ScatterAndGatherFedSM training phase.")
self._phase = AppConstants.PHASE_TRAIN
fl_ctx.set_prop(AppConstants.PHASE, self._phase, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.NUM_ROUNDS, self._num_rounds, private=True, sticky=False)
self.fire_event(AppEventType.TRAINING_STARTED, fl_ctx)
for self._current_round in range(self._start_round, self._start_round + self._num_rounds):
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.log_info(fl_ctx, f"Round {self._current_round} started.")
self.log_info(fl_ctx, f"Models in fl_ctx: {self._global_weights['weights'].keys()}")
fl_ctx.set_prop(
AppConstants.GLOBAL_MODEL,
self._global_weights,
private=True,
sticky=True,
)
fl_ctx.set_prop(
AppConstants.CURRENT_ROUND,
self._current_round,
private=True,
sticky=False,
)
self.fire_event(AppEventType.ROUND_STARTED, fl_ctx)
# Create train_task for each participating clients, 3 models for each
tasks_each_round = []
for client_id in self.participating_clients:
# get the models for a client
select_weight = self._global_weights["weights"]["select_weights"]
global_weight = self._global_weights["weights"]["global_weights"]
client_weight = self._global_weights["weights"][client_id]
client_label = self.client_id_label_mapping[client_id]
# add all three models using a DXO collection
dxo_select_weights = DXO(data_kind=DataKind.WEIGHT_DIFF, data=select_weight)
dxo_global_weights = DXO(data_kind=DataKind.WEIGHT_DIFF, data=global_weight)
dxo_person_weights = DXO(data_kind=DataKind.WEIGHTS, data=client_weight)
# add Adam parameter sets
if self._current_round > 0:
select_exp_avg = self._global_weights["weights"]["select_exp_avg"]
select_exp_avg_sq = self._global_weights["weights"]["select_exp_avg_sq"]
dxo_select_exp_avg = DXO(data_kind=DataKind.WEIGHTS, data=select_exp_avg)
dxo_select_exp_avg_sq = DXO(data_kind=DataKind.WEIGHTS, data=select_exp_avg_sq)
else:
dxo_select_exp_avg = None
dxo_select_exp_avg_sq = None
# create dxo for client
dxo_dict = {
# add selector model weights and Adam parameters
"select_weights": dxo_select_weights,
"select_exp_avg": dxo_select_exp_avg,
"select_exp_avg_sq": dxo_select_exp_avg_sq,
# add global and personalized model weights
"global_weights": dxo_global_weights,
"person_weights": dxo_person_weights,
# add target id info for checking at client end
"target_id": client_id,
# add target label for client end selector training
"select_label": client_label,
}
dxo_collection = DXO(data_kind=DataKind.COLLECTION, data=dxo_dict)
data_shareable = dxo_collection.to_shareable()
# add meta information
data_shareable.set_header(AppConstants.CURRENT_ROUND, self._current_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, self._num_rounds)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, self._current_round)
# create task
train_task = Task(
name=self.train_task_name,
data=data_shareable,
props={},
timeout=self._train_timeout,
before_task_sent_cb=self._prepare_train_task_data,
result_received_cb=self._process_train_result,
)
# send only to the target client
if self.parallel_task:
# tasks send in parallel
self.send(
task=train_task,
targets=[client_id],
fl_ctx=fl_ctx,
)
tasks_each_round.append(train_task)
else:
# tasks will be executed sequentially
self.send_and_wait(
task=train_task,
targets=[client_id],
fl_ctx=fl_ctx,
)
if self._check_abort_signal(fl_ctx, abort_signal):
return
# wait for all tasks in this round to finish
if self.parallel_task:
for task in tasks_each_round:
self._wait_for_task(task, abort_signal)
# aggregate the returned results in shareable
self.fire_event(AppEventType.BEFORE_AGGREGATION, fl_ctx)
aggr_result = self.aggregator.aggregate(fl_ctx)
collection_dxo = from_shareable(aggr_result)
fl_ctx.set_prop(
AppConstants.AGGREGATION_RESULT,
aggr_result,
private=True,
sticky=False,
)
self.fire_event(AppEventType.AFTER_AGGREGATION, fl_ctx)
if self._check_abort_signal(fl_ctx, abort_signal):
return
# update all models using shareable generator for FedSM
self.fire_event(AppEventType.BEFORE_SHAREABLE_TO_LEARNABLE, fl_ctx)
self._global_weights = self.shareable_gen.shareable_to_learnable(
shareable=collection_dxo.to_shareable(),
client_ids=self.participating_clients,
fl_ctx=fl_ctx,
)
# update models
fl_ctx.set_prop(
AppConstants.GLOBAL_MODEL,
self._global_weights,
private=True,
sticky=True,
)
fl_ctx.sync_sticky()
self.fire_event(AppEventType.AFTER_SHAREABLE_TO_LEARNABLE, fl_ctx)
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.fire_event(AppEventType.BEFORE_LEARNABLE_PERSIST, fl_ctx)
self.persistor.save(self._global_weights["weights"], fl_ctx)
self.fire_event(AppEventType.AFTER_LEARNABLE_PERSIST, fl_ctx)
self.fire_event(AppEventType.ROUND_DONE, fl_ctx)
self.log_info(fl_ctx, f"Round {self._current_round} finished.")
self._phase = AppConstants.PHASE_FINISHED
self.log_info(fl_ctx, "Finished ScatterAndGatherFedSM Training.")
except Exception as e:
traceback.print_exc()
error_msg = f"Exception in ScatterAndGatherFedSM control_flow: {e}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(str(e), fl_ctx)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/workflows/scatter_and_gather_fedsm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: root path containing all jsons
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
datalist_json_path_client = os.path.join(
datalist_json_path,
client_id + ".json",
)
return datalist_json_path_client
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/utils/custom_client_datalist_json_path.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nvflare.apis.dxo import MetaKey
from nvflare.app_common.abstract.model import ModelLearnableKey, make_model_learnable
from nvflare.app_common.app_constant import ModelFormat
class PTModelPersistenceFormatManagerFedSM(object):
PERSISTENCE_KEY_FEDSM_MODELS = "model_set_fedsm"
PERSISTENCE_KEY_TRAIN_CONF = "train_conf"
PERSISTENCE_KEY_META_PROPS = "meta_props"
def __init__(self, data: dict, default_train_conf=None):
"""Manage the format for model persistence.
Args:
data (dict): a dict of dict.
default_train_conf (dict, optional): configuration for train. Defaults to None.
Raises:
TypeError: when data is not a dictionary
"""
if not isinstance(data, dict):
raise TypeError(f"data must be a dict but got {type(data)}")
self.meta = None
self.train_conf = None
self.other_props = {} # other props from the original data that need to be kept
# dict of dicts
self.model_set = data[self.PERSISTENCE_KEY_FEDSM_MODELS]
self.meta = data.get(self.PERSISTENCE_KEY_META_PROPS, None)
self.train_conf = data.get(self.PERSISTENCE_KEY_TRAIN_CONF, None)
# we need to keep other props, if any, so they can be kept when persisted
for k, v in data.items():
if k not in [
self.PERSISTENCE_KEY_FEDSM_MODELS,
self.PERSISTENCE_KEY_META_PROPS,
self.PERSISTENCE_KEY_TRAIN_CONF,
]:
self.other_props[k] = v
if not self.train_conf:
self.train_conf = default_train_conf
def _get_processed_vars(self) -> dict:
if self.meta:
return self.meta.get(MetaKey.PROCESSED_KEYS, {})
else:
return {}
def to_model_learnable(self, exclude_vars) -> dict:
processed_vars = self._get_processed_vars()
# learnable of learnables
model_set = {}
for model_id in self.model_set.keys():
weights = {}
var_dict = self.model_set[model_id]
for k, v in var_dict.items():
if exclude_vars and exclude_vars.search(k):
continue
is_processed = processed_vars.get(k, False)
if is_processed:
weights[k] = v
else:
weights[k] = v.cpu().numpy()
model_set[model_id] = make_model_learnable(weights, self.meta)
model_set = make_model_learnable(model_set, self.meta)
return model_set
def get_single_model(self, model_id) -> dict:
processed_vars = self._get_processed_vars()
weights = OrderedDict()
var_dict = self.model_set[model_id]
for k, v in var_dict.items():
is_processed = processed_vars.get(k, False)
if is_processed:
weights[k] = v
else:
# weights[k] = v.cpu().numpy()
weights[k] = v
return weights
def to_persistence_dict(self) -> dict:
# processed_vars = self._get_processed_vars()
model_set = {}
for model_id in self.model_set.keys():
weights = OrderedDict()
var_dict = self.model_set[model_id]
for k, v in var_dict.items():
# is_processed = processed_vars.get(k, False)
# if is_processed:
weights[k] = v
# else:
# weights[k] = v.cpu().numpy()
model_set[model_id] = weights
# always use complex format for saving
persistence_dict = OrderedDict()
persistence_dict[self.PERSISTENCE_KEY_FEDSM_MODELS] = model_set
if self.meta:
persistence_dict[self.PERSISTENCE_KEY_META_PROPS] = self.meta
if self.train_conf:
persistence_dict[self.PERSISTENCE_KEY_TRAIN_CONF] = self.train_conf
if self.other_props:
for k, v in self.other_props.items():
persistence_dict[k] = v
return persistence_dict
def update(self, ml_dict: dict):
"""Update the persistence data with the learned values.
Args:
ml_dict (Dict of ModelLearnable): updated information to be merged into existing Dict of ModelLearnable
"""
for model_id in ml_dict.keys():
if model_id != "meta":
ml = ml_dict[model_id]
# update with value of the model learnable
# note that the original weights that are not learned are still kept!
learned_weights = ml[ModelLearnableKey.WEIGHTS]
for k, v in learned_weights.items():
self.model_set[model_id][k] = v
def get_persist_model_format(self):
return ModelFormat.PT_CHECKPOINT
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/persistors/pt_fed_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from collections import OrderedDict
import torch
from persistors.pt_fed_utils import PTModelPersistenceFormatManagerFedSM
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable
from nvflare.app_common.app_constant import DefaultCheckpointFileName
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
class PTFileFedSMModelPersistor(PTFileModelPersistor):
def __init__(
self,
client_ids,
exclude_vars=None,
model=None,
model_selector=None,
model_file_name=DefaultCheckpointFileName.GLOBAL_MODEL,
best_model_file_name=DefaultCheckpointFileName.BEST_GLOBAL_MODEL,
source_ckpt_file_full_name=None,
):
"""Persist a dict of personalized pytorch-based models to/from file system.
Single model behavior is the same as PTFileModelPersistor,
Instead of a single model, it creates a set of models with the same structure corresponding to each client
Args:
client_ids (list): the list of client ids
exclude_vars (str, optional): regex expression specifying weight vars to be excluded from training. Defaults to None.
model (str, optional): torch model object or component id of the model object. Defaults to None.
model_selector (str, optional): torch model object or component id of the model_selector object. Defaults to None.
global_model_file_name (str, optional): file name for saving global model. Defaults to DefaultCheckpointFileName.GLOBAL_MODEL.
best_global_model_file_name (str, optional): file name for saving best global model. Defaults to DefaultCheckpointFileName.BEST_GLOBAL_MODEL.
source_ckpt_file_full_name (str, optional): full file name for source model checkpoint file. Defaults to None.
Raises:
ValueError: when source_ckpt_file_full_name does not exist
"""
super().__init__(
exclude_vars=exclude_vars,
model=model,
global_model_file_name=model_file_name,
best_global_model_file_name=best_model_file_name,
source_ckpt_file_full_name=source_ckpt_file_full_name,
)
self.client_ids = client_ids
self.model_selector = model_selector
def _initialize(self, fl_ctx: FLContext):
self.log_info(fl_ctx, "FedSM model persistor initialized")
super()._initialize(fl_ctx=fl_ctx)
# First convert str model description to model
if isinstance(self.model_selector, str):
# treat it as model component ID
model_component_id = self.model_selector
engine = fl_ctx.get_engine()
self.model_selector = engine.get_component(model_component_id)
if not self.model_selector:
self.system_panic(
reason=f"cannot find model component '{model_component_id}'",
fl_ctx=fl_ctx,
)
return
if not isinstance(self.model_selector, torch.nn.Module):
self.system_panic(
reason=f"expect model component '{model_component_id}' to be torch.nn.Module but got {type(self.model_selector)}",
fl_ctx=fl_ctx,
)
return
elif self.model_selector and not isinstance(self.model_selector, torch.nn.Module):
self.system_panic(
reason=f"expect model to be torch.nn.Module but got {type(self.model)}",
fl_ctx=fl_ctx,
)
return
# self.model and self.model_selector is only for getting the model structure from config_3
# operations will be performed on a set of models, self.model_set_fedsm
# consisting:
# selector: selector model
# global: global model
# {client_id}: personalized models with same structure as global model
self.model_set_fedsm = {}
# initialize all models
self.model_set_fedsm["select_weights"] = copy.deepcopy(self.model_selector)
self.model_set_fedsm["select_exp_avg"] = OrderedDict()
self.model_set_fedsm["select_exp_avg_sq"] = OrderedDict()
self.model_set_fedsm["select_weights"] = copy.deepcopy(self.model_selector)
self.model_set_fedsm["global_weights"] = copy.deepcopy(self.model)
for id in self.client_ids:
self.model_set_fedsm[id] = copy.deepcopy(self.model)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> dict:
"""Convert initialised models into a dict of Learnable/Model format.
Args:
fl_ctx (FLContext): FL Context delivered by workflow
Returns:
Dict of models: a Dict of Learnable/Model object
"""
src_file_name = None
if self.source_ckpt_file_full_name:
src_file_name = self.source_ckpt_file_full_name
elif self.ckpt_preload_path:
src_file_name = self.ckpt_preload_path
# data is a dict of FedSM model set with other training-related items,
# FedSM model set under a dict "model_set_fedsm"
# containing select_weights, global_weights, and personal models under each client_id
data = {"model_set_fedsm": {}}
if src_file_name:
try:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = torch.load(src_file_name, map_location=device)
# checkpoint may contain a dict "model_set_fedsm" of models indexed with model ids
# 'optimizer', 'lr_scheduler', etc.
except:
self.log_exception(fl_ctx, f"error loading checkpoint from {src_file_name}")
self.system_panic(reason="cannot load model checkpoint", fl_ctx=fl_ctx)
return None
else:
# if no pretrained model provided, use the generated network weights from APP config_3
# note that, if set "determinism" in the config_3, the init model weights will always be the same
try:
data["model_set_fedsm"]["select_weights"] = (
self.model_set_fedsm["select_weights"].state_dict()
if self.model_set_fedsm["select_weights"] is not None
else OrderedDict()
)
data["model_set_fedsm"]["select_exp_avg"] = OrderedDict()
data["model_set_fedsm"]["select_exp_avg_sq"] = OrderedDict()
data["model_set_fedsm"]["global_weights"] = (
self.model_set_fedsm["global_weights"].state_dict()
if self.model_set_fedsm["global_weights"] is not None
else OrderedDict()
)
for id in self.client_ids:
data["model_set_fedsm"][id] = (
self.model_set_fedsm[id].state_dict() if self.model_set_fedsm[id] is not None else OrderedDict()
)
except:
self.log_exception(fl_ctx, "error getting state_dict from model object")
self.system_panic(reason="cannot create state_dict from model object", fl_ctx=fl_ctx)
return None
if self.model and self.model_selector:
self.default_train_conf = {
"train": {
"model": type(self.model).__name__,
"model_selector": type(self.model_selector).__name__,
}
}
self.persistence_manager = PTModelPersistenceFormatManagerFedSM(
data, default_train_conf=self.default_train_conf
)
learnable = self.persistence_manager.to_model_learnable(self.exclude_vars)
return learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
model_list = ["global_weights", "select_weights"] + self.client_ids
for model_id in model_list:
if event == "fedsm_best_model_available_" + model_id:
# save the current model as the best model
best_ckpt_save_path = os.path.join(self.log_dir, model_id + "_" + self.best_global_model_file_name)
self.save_best_model(model_id, best_ckpt_save_path)
self.log_info(fl_ctx, f"new best model for {model_id} saved.")
def save_best_model(self, model_id: str, save_path: str):
save_dict = self.persistence_manager.get_single_model(model_id)
torch.save(save_dict, save_path)
def save_model(self, ml_dict: dict, fl_ctx: FLContext):
self.persistence_manager.update(ml_dict)
self.save_model_file(self._ckpt_save_path)
def get_model(self, model_file, fl_ctx: FLContext) -> ModelLearnable:
try:
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Use the "cpu" to load the global model weights, avoid GPU out of memory
device = "cpu"
location = os.path.join(self.log_dir, model_file)
data = torch.load(location, map_location=device)
persistence_manager = PTModelPersistenceFormatManagerFedSM(data, default_train_conf=self.default_train_conf)
return persistence_manager.to_model_learnable(self.exclude_vars)
except Exception:
self.log_exception(fl_ctx, f"error loading checkpoint from {model_file}")
return {}
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/persistors/pt_file_fedsm_model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.optim as optim
from helpers.supervised_pt_fedsm import SupervisedPTFedSMHelper
from learners.supervised_monai_prostate_learner import SupervisedMonaiProstateLearner
from monai.losses import DiceLoss
from monai.networks.nets.unet import UNet
from networks.vgg import vgg11
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedMonaiProstateFedSMLearner(SupervisedMonaiProstateLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
fedsm_select_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Trainer for prostate segmentation task. It inherits from MONAI trainer.
Args:
train_config_filename: directory of config_3 file.
fedsm_select_epochs: the number of training epochs of selector model. Defaults to 1.
aggregation_epochs: the number of training epochs of both global and personalized models for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
SupervisedMonaiProstateLearner.__init__(
self,
train_config_filename=train_config_filename,
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.fedsm_person_model_epochs = aggregation_epochs
self.fedsm_select_model_epochs = fedsm_select_epochs
self.fedsm_helper = None
def train_config(self, fl_ctx: FLContext):
# Initialize superclass
SupervisedMonaiProstateLearner.train_config(self, fl_ctx)
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# Initialize PTFedSMHelper
# personalized and selector model training epoch
# personalized model same as global model
# selector model can be different from the other two task models
fedsm_person_model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
fedsm_select_model = vgg11(
num_classes=self.config_info["select_num_classes"],
).to(self.device)
fedsm_person_criterion = DiceLoss(sigmoid=True)
fedsm_select_criterion = torch.nn.CrossEntropyLoss()
fedsm_person_optimizer = optim.Adam(fedsm_person_model.parameters(), lr=self.lr)
fedsm_select_optimizer = optim.Adam(
fedsm_select_model.parameters(), lr=self.config_info["learning_rate_select"]
)
self.fedsm_helper = SupervisedPTFedSMHelper(
person_model=fedsm_person_model,
select_model=fedsm_select_model,
person_criterion=fedsm_person_criterion,
select_criterion=fedsm_select_criterion,
person_optimizer=fedsm_person_optimizer,
select_optimizer=fedsm_select_optimizer,
device=self.device,
app_dir=app_dir,
person_model_epochs=self.fedsm_person_model_epochs,
select_model_epochs=self.fedsm_select_model_epochs,
)
def reshape_global_weights(self, local_var_dict: dict, global_weights: dict):
model_keys = global_weights.keys()
n_loaded = 0
# tensors might need to be reshaped to support HE for secure aggregation.
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
if n_loaded == 0:
raise ValueError(f"No global weights loaded! Received weight dict is {global_weights}")
return local_var_dict
def compute_model_diff(self, initial_model: dict, end_model: dict, fl_ctx: FLContext):
model_diff = {}
for name in initial_model:
if name not in end_model:
continue
model_diff[name] = np.subtract(end_model[name].cpu().numpy(), initial_model[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
return model_diff
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Training task pipeline for FedSM
Get global/client/selector model weights (potentially with HE)
Local training all three models
Return updated weights of all three models (model_diff)
together with the optimizer parameters of selector (model)
"""
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model parameters with received dxo
dxo = from_shareable(shareable)
# load global model weights
global_weights = dxo.data["global_weights"].data["weights"]
local_var_dict = self.model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, global_weights)
self.model.load_state_dict(local_var_dict)
# load personalized model weights
person_weights = dxo.data["person_weights"].data["weights"]
local_var_dict = self.fedsm_helper.person_model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, person_weights)
self.fedsm_helper.person_model.load_state_dict(local_var_dict)
# load selector model weights
select_weights = dxo.data["select_weights"].data["weights"]
local_var_dict = self.fedsm_helper.select_model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, select_weights)
self.fedsm_helper.select_model.load_state_dict(local_var_dict)
# get selector label
select_label = dxo.data["select_label"]
# update Adam parameters
if current_round > 0:
# get weights from dxo collection
global_exp_avg = dxo.data.get("select_exp_avg").data["weights"]
global_exp_avg_sq = dxo.data.get("select_exp_avg_sq").data["weights"]
# load parameters to optimizer
local_optim_state_dict = self.fedsm_helper.select_optimizer.state_dict()
for name in local_optim_state_dict["state"]:
local_optim_state_dict["state"][name]["exp_avg"] = torch.as_tensor(global_exp_avg[str(name)])
local_optim_state_dict["state"][name]["exp_avg_sq"] = torch.as_tensor(global_exp_avg_sq[str(name)])
self.fedsm_helper.select_optimizer.load_state_dict(local_optim_state_dict)
# local trainings for three models
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# local train global model
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
abort_signal=abort_signal,
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# local train personalized model
self.fedsm_helper.local_train_person(
train_loader=self.train_loader,
abort_signal=abort_signal,
writer=self.writer,
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# local train selector
self.fedsm_helper.local_train_select(
train_loader=self.train_loader,
select_label=select_label,
abort_signal=abort_signal,
writer=self.writer,
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# compute delta models, initial models has the primary key set
local_weights = self.model.state_dict()
model_diff_global = self.compute_model_diff(global_weights, local_weights, fl_ctx)
local_weights = self.fedsm_helper.person_model.state_dict()
model_person = local_weights
for name in model_person:
model_person[name] = model_person[name].cpu().numpy()
local_weights = self.fedsm_helper.select_model.state_dict()
model_diff_select = self.compute_model_diff(select_weights, local_weights, fl_ctx)
# directly return the optimizer parameters
optim_weights = self.fedsm_helper.select_optimizer.state_dict().get("state")
exp_avg = {}
exp_avg_sq = {}
for name in optim_weights:
exp_avg[str(name)] = optim_weights[name]["exp_avg"].cpu().numpy()
exp_avg_sq[str(name)] = optim_weights[name]["exp_avg_sq"].cpu().numpy()
# build the shareable
dxo_dict = {
"global_weights": DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff_global),
"person_weights": DXO(data_kind=DataKind.WEIGHTS, data=model_person),
"select_weights": DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff_select),
"select_exp_avg": DXO(data_kind=DataKind.WEIGHTS, data=exp_avg),
"select_exp_avg_sq": DXO(data_kind=DataKind.WEIGHTS, data=exp_avg_sq),
}
dxo_collection = DXO(data_kind=DataKind.COLLECTION, data=dxo_dict)
dxo_collection.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo_collection.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Validation task pipeline for FedSM
Validate all three models: global/personal/selector
Return validation score for server-end best model selection and record
"""
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
# validation on models from server
# renamed to "models_from_server" to avoid confusion with "global_model"
model_owner = "models_from_server"
# update local model weights with received dxo
dxo = from_shareable(shareable)
# load global model weights
global_weights = dxo.data["global_weights"].data["weights"]
local_var_dict = self.model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, global_weights)
self.model.load_state_dict(local_var_dict)
# load personalized model weights
person_weights = dxo.data["person_weights"].data["weights"]
local_var_dict = self.fedsm_helper.person_model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, person_weights)
self.fedsm_helper.person_model.load_state_dict(local_var_dict)
# load selector model weights
select_weights = dxo.data["select_weights"].data["weights"]
local_var_dict = self.fedsm_helper.select_model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, select_weights)
self.fedsm_helper.select_model.load_state_dict(local_var_dict)
# get selector label
select_label = dxo.data["select_label"]
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
person_metric = self.local_valid(
self.fedsm_helper.person_model,
self.valid_loader,
abort_signal,
tb_id="val_metric_person_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_person_model ({model_owner}): {person_metric:.4f}")
# save personalized model locally
person_best = self.fedsm_helper.update_metric_save_person_model(
current_round=current_round, metric=person_metric
)
if person_best:
self.log_info(fl_ctx, "best personalized model available")
select_metric = self.fedsm_helper.local_valid_select(
self.valid_loader,
select_label,
abort_signal,
tb_id="val_metric_select_model",
writer=self.writer,
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_select_model ({model_owner}): {select_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
# on the two models: global and selector
# personalized metrics will not be averaged, send a flag to state the best model availability
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: [global_metric, select_metric, person_best]},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/learners/supervised_monai_prostate_fedsm_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Simple Supervised Trainer.
This provides the basic functionality of a local learner: perform before-train validation on
global model at the beginning of each round, perform local training, and send the updated weights.
No model will be saved locally, tensorboard record for local loss and global model validation score.
Enabled FedAvg
Args:
train_config_filename: directory of config_3 file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.best_metric = 0.0
self.client_id = None
self.writer = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
self.train_config(fl_ctx)
@abstractmethod
def train_config(self, fl_ctx: FLContext):
"""Traning configurations customized to individual tasks
This can be specified / loaded in any ways
as long as they are made available for further training and validation
some potential items include but not limited to:
self.lr
self.model
self.device
self.optimizer
self.criterion
self.transform_train
self.transform_valid
self.transform_post
self.train_loader
self.valid_loader
self.inferer
self.valid_metric
"""
raise NotImplementedError
@abstractmethod
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
abort_signal: Signal,
current_round,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader: image / label
Compute outputs with self.model
Compute loss with self.criterion
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
epoch_global = current_round * self.aggregation_epochs + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
current_round=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric, current_round)
return metric
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Validation on local data
Return validation score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/learners/supervised_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
import torch.optim as optim
from learners.supervised_learner import SupervisedLearner
from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets.unet import UNet
from monai.transforms import (
Activations,
AsDiscrete,
AsDiscreted,
Compose,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
Resized,
ScaleIntensityRanged,
)
from utils.custom_client_datalist_json_path import custom_client_datalist_json_path
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
class SupervisedMonaiProstateLearner(SupervisedLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""MONAI Learner for prostate segmentation task.
It inherits from SupervisedLearner.
Args:
train_config_filename: path for config_3 file, this is an addition term for config_3 loading
aggregation_epochs: the number of training epochs for a round.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__(
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.train_config_filename = train_config_filename
self.config_info = None
self.lr = None
self.model = None
self.device = None
self.optimizer = None
self.criterion = None
self.transform = None
self.transform_post = None
self.train_loader = None
self.valid_loader = None
self.inferer = None
self.valid_metric = None
def train_config(self, fl_ctx: FLContext):
"""MONAI traning configuration
Here, we use a json to specify the needed parameters
"""
# Load training configurations json
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
self.config_info = json.load(file)
# Get the config_info
self.lr = self.config_info["learning_rate"]
cache_rate = self.config_info["cache_dataset"]
dataset_base_dir = self.config_info["dataset_base_dir"]
datalist_json_path = self.config_info["datalist_json_path"]
# Get datalist json
datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
# Set datalist
train_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="training",
base_dir=dataset_base_dir,
)
valid_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
self.log_info(
fl_ctx,
f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = UNet(
spatial_dims=2,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.criterion = DiceLoss(sigmoid=True)
self.transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
Resized(
keys=["image", "label"],
spatial_size=(256, 256),
mode=("bilinear"),
align_corners=True,
),
AsDiscreted(keys=["label"], threshold=0.5),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
if cache_rate > 0.0:
train_dataset = CacheDataset(
data=train_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=0,
)
valid_dataset = CacheDataset(
data=valid_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=0,
)
else:
train_dataset = Dataset(
data=train_list,
transform=self.transform,
)
valid_dataset = Dataset(
data=valid_list,
transform=self.transform,
)
self.train_loader = DataLoader(
train_dataset,
batch_size=1,
shuffle=True,
num_workers=0,
)
self.valid_loader = DataLoader(
valid_dataset,
batch_size=1,
shuffle=False,
num_workers=0,
)
# Set inferer and evaluation metric
self.inferer = SimpleInferer()
self.valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/learners/supervised_monai_prostate_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Union, cast
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = [
"VGG",
"vgg11",
]
model_urls = {
"vgg11": "https://download.pytorch.org/models/vgg11-8a719046.pth",
}
class VGG(nn.Module):
def __init__(self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
feature = torch.flatten(x, 1)
x = self.classifier(feature)
return x
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
# single channel input
in_channels = 1
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"]}
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
if pretrained:
kwargs["init_weights"] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11", "A", False, pretrained, progress, **kwargs)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/networks/vgg.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.model import (
ModelLearnable,
ModelLearnableKey,
make_model_learnable,
model_learnable_to_dxo,
)
from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
from nvflare.app_common.app_constant import AppConstants
class FullModelShareableFedSMGenerator(ShareableGenerator):
def learnable_to_shareable(self, model_learnable: ModelLearnable, fl_ctx: FLContext) -> Shareable:
"""Convert ModelLearnable to Shareable.
Args:
model_learnable (ModelLearnable): model to be converted
fl_ctx (FLContext): FL context
Returns:
Shareable: a shareable containing a DXO object.
"""
dxo = model_learnable_to_dxo(model_learnable)
return dxo.to_shareable()
def update_single_model(self, dxo_single_model, base_model_set, model_id, fl_ctx: FLContext):
if not dxo_single_model:
self.log_error(fl_ctx, f"Aggregated model weights for {model_id} are missing!")
return
# get base_model from the base_model_set
base_model = base_model_set[model_id]
if not base_model:
self.system_panic(
reason=f"No base personalized model for {model_id}!",
fl_ctx=fl_ctx,
)
return base_model
weights = base_model[ModelLearnableKey.WEIGHTS]
# update with aggregated dxo
if dxo_single_model.data_kind == DataKind.WEIGHT_DIFF:
# add aggregated weight_diff from aggregator record to the base model weights
if dxo_single_model is not None:
model_diff = dxo_single_model.data
for v_name in model_diff.keys():
weights[v_name] = weights[v_name] + model_diff[v_name]
elif dxo_single_model.data_kind == DataKind.WEIGHTS:
# update weights directly
weights_new = dxo_single_model.data
if not weights_new:
self.log_info(
fl_ctx,
f"No model weights for {model_id} found. Model will not be updated.",
)
else:
base_model[ModelLearnableKey.WEIGHTS] = weights_new
else:
raise ValueError(
f"data_kind should be either DataKind.WEIGHTS or DataKind.WEIGHT_DIFF, but got {dxo_single_model.data_kind}"
)
# set meta and set base_model_set
base_model[ModelLearnableKey.META] = dxo_single_model.get_meta_props()
base_model_set[model_id] = base_model
def shareable_to_learnable(self, shareable: Shareable, client_ids: list, fl_ctx: FLContext) -> ModelLearnable:
"""Convert Shareable to ModelLearnable.
Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS
Args:
shareable (Shareable): Shareable that contains a DXO object
client_ids: client id list for getting the personalized models
fl_ctx (FLContext): FL context
Returns:
A ModelLearnable object
Raises:
TypeError: if shareable is not of type shareable
ValueError: if data_kind is not `DataKind.WEIGHTS` and is not `DataKind.WEIGHT_DIFF`
"""
if not isinstance(shareable, Shareable):
raise TypeError(f"shareable must be Shareable, but got {type(shareable)}.")
# base_model_set is a "flattened set", containing all models with ids
# "select_weights", "select_exp_avg", "select_exp_avg_sq", "global_weights", and client_ids
base_model_set = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)["weights"]
meta = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)["meta"]
if not base_model_set:
self.system_panic(reason="No FedSM base model set!", fl_ctx=fl_ctx)
return base_model_set
# dxo from aggregator is hierarchically organized as ["select_weights", "global_weights", "person_weights"]
# "global_weights" is a dxo for global model
# "person_weights" is a dxo collection containing dxo for each client_id
# "select_weights" is a dxo collection containing dxo for ["select_weights", "exp_avg", "exp_avg_sq"]
dxo = from_shareable(shareable)
dxo_global = dxo.data.get("global_weights")
self.update_single_model(dxo_global, base_model_set, "global_weights", fl_ctx)
dxo_person = dxo.data.get("person_weights")
for model_id in client_ids:
dxo_single = dxo_person.get(model_id)
self.update_single_model(dxo_single, base_model_set, model_id, fl_ctx)
dxo_select = dxo.data.get("select_weights")
for model_id in ["select_weights", "select_exp_avg", "select_exp_avg_sq"]:
dxo_single = dxo_select.get(model_id)
self.update_single_model(dxo_single, base_model_set, model_id, fl_ctx)
model_set = make_model_learnable(base_model_set, meta)
return model_set
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/shareablegenerators/full_model_shareable_fedsm_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from helpers.pt_fedsm import PTFedSMHelper
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.shareable import make_reply
from nvflare.apis.signal import Signal
class SupervisedPTFedSMHelper(PTFedSMHelper):
"""Helper to be used with FedSM components under supervised training specs"""
def __init__(
self,
person_model,
select_model,
person_criterion,
select_criterion,
person_optimizer,
select_optimizer,
device,
app_dir,
person_model_epochs,
select_model_epochs,
):
super().__init__(
person_model,
select_model,
person_criterion,
select_criterion,
person_optimizer,
select_optimizer,
device,
app_dir,
person_model_epochs,
select_model_epochs,
)
def local_train_person(self, train_loader, abort_signal: Signal, writer, current_round):
# Train personalized model, and keep track of curves
for epoch in range(self.person_model_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.person_model.train()
epoch_len = len(train_loader)
epoch_global = current_round * self.person_model_epochs + epoch
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.person_model(inputs)
loss = self.person_criterion(outputs, labels)
self.person_optimizer.zero_grad()
loss.backward()
self.person_optimizer.step()
current_step = epoch_len * epoch_global + i
writer.add_scalar("train_loss_personalized", loss.item(), current_step)
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/helpers/supervised_pt_fedsm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import abstractmethod
import numpy as np
import torch
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.shareable import make_reply
from nvflare.apis.signal import Signal
def AccuracyTopK(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class PTFedSMHelper(object):
def __init__(
self,
person_model,
select_model,
person_criterion,
select_criterion,
person_optimizer,
select_optimizer,
device,
app_dir,
person_model_epochs: int = 1,
select_model_epochs: int = 1,
):
"""Helper to be used with FedSM components.
Implements the functions used for the algorithm proposed in
Xu et al. "Closing the Generalization Gap of Cross-silo Federated Medical Image Segmentation"
(https://arxiv.org/abs/2203.10144) using PyTorch.
Args:
person/select_model: the personalized and selector models
person/select_criterion: loss criterion
person/select_optimizer: training optimizer the two models
device: device for model training
app_dir: needed for local personalized model saving
person/select_model_epochs: total training epochs each round
Returns:
None
"""
self.person_model = person_model
self.select_model = select_model
self.person_criterion = person_criterion
self.select_criterion = select_criterion
self.person_optimizer = person_optimizer
self.select_optimizer = select_optimizer
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.person_model_epochs = person_model_epochs
self.select_model_epochs = select_model_epochs
# check criterion, model, and optimizer type
if not isinstance(self.person_model, torch.nn.Module):
raise ValueError(f"person_model component must be torch model. But got: {type(self.person_model)}")
if not isinstance(self.select_model, torch.nn.Module):
raise ValueError(f"select_model component must be torch model. But got: {type(self.select_model)}")
if not isinstance(self.person_criterion, torch.nn.modules.loss._Loss):
raise ValueError(f"person_criterion component must be torch loss. But got: {type(self.person_criterion)}")
if not isinstance(self.select_criterion, torch.nn.modules.loss._Loss):
raise ValueError(f"select_criterion component must be torch loss. But got: {type(self.select_criterion)}")
if not isinstance(self.person_optimizer, torch.optim.Optimizer):
raise ValueError(
f"person_optimizer component must be torch optimizer. But got: {type(self.person_optimizer)}"
)
if not isinstance(self.select_optimizer, torch.optim.Optimizer):
raise ValueError(
f"select_optimizer component must be torch optimizer. But got: {type(self.select_optimizer)}"
)
if not isinstance(self.device, torch.device):
raise ValueError(f"device component must be torch device. But got: {type(self.device)}")
# initialize other recording related parameters
# save personalized model to local file
# note: global and selector model saved on server
self.person_best_metric = 0
self.person_model_file_path = os.path.join(app_dir, "personalized_model.pt")
self.best_person_model_file_path = os.path.join(app_dir, "best_personalized_model.pt")
def save_person_model(self, current_round, is_best=False):
# save personalized model locally
model_weights = self.person_model.state_dict()
save_dict = {"model": model_weights, "epoch": current_round}
if is_best:
save_dict.update({"best_metric": self.person_best_metric})
torch.save(save_dict, self.best_person_model_file_path)
else:
torch.save(save_dict, self.person_model_file_path)
def local_train_select(self, train_loader, select_label, abort_signal: Signal, writer, current_round):
# Train selector model in full batch manner, and keep track of curves
for epoch in range(self.select_model_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.select_model.train()
epoch_len = len(train_loader)
epoch_global = current_round * self.select_model_epochs + epoch
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
# construct vector of selector label
labels = np.ones(inputs.size()[0], dtype=np.int64) * select_label
labels = torch.tensor(labels).to(self.device)
# forward + backward
outputs = self.select_model(inputs)
loss = self.select_criterion(outputs, labels)
loss.backward()
current_step = epoch_len * epoch_global + i
writer.add_scalar("train_loss_selector", loss.item(), current_step)
# Full batch training, 1 step per epoch
self.select_optimizer.step()
self.select_optimizer.zero_grad()
def local_valid_select(
self,
valid_loader,
select_label,
abort_signal: Signal,
tb_id=None,
writer=None,
current_round=None,
):
# Validate selector model
self.select_model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# input and expected output
images = batch_data["image"].to(self.device)
# generate label vector: image batch_size, same label
select = np.ones(images.size()[0], dtype=np.int64) * select_label
select = torch.tensor(select).to(self.device)
# inference
outputs = self.select_model(images)
# compute metric
metric_score = AccuracyTopK(outputs, select, topk=(1,))
metric += metric_score[0].item()
# compute mean acc over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
writer.add_scalar(tb_id, metric, current_round)
return metric
def update_metric_save_person_model(self, current_round, metric):
self.save_person_model(current_round, is_best=False)
if metric > self.person_best_metric:
self.person_best_metric = metric
self.save_person_model(current_round, is_best=True)
return 1
else:
return 0
@abstractmethod
def local_train_person(self, train_loader, abort_signal: Signal, writer):
# Train personal model for self.model_epochs, and keep track of curves
# This part is task dependent, need customization
raise NotImplementedError
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/helpers/pt_fedsm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, ReservedKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
class IntimeModelFedSMSelector(FLComponent):
def __init__(self, weigh_by_local_iter=False, aggregation_weights=None):
"""Handler to determine if the model is globally best.
Note that only model_global and model_select participate in the metric averaging process,
while personalized models directly record the availability status
Args:
weigh_by_local_iter (bool, optional): whether the metrics should be weighted by trainer's iteration number. Defaults to False.
aggregation_weights (dict, optional): a mapping of client name to float for aggregation. Defaults to None.
"""
super().__init__()
self.val_metric_global = self.best_val_metric_global = -np.inf
self.val_metric_select = self.best_val_metric_select = -np.inf
self.weigh_by_local_iter = weigh_by_local_iter
self.validation_metric_name = MetaKey.INITIAL_METRICS
self.aggregation_weights = aggregation_weights or {}
self.person_best_status = {}
self.logger.debug(f"model selection weights control: {aggregation_weights}")
self._reset_stats()
def handle_event(self, event_type: str, fl_ctx: FLContext):
"""Perform the handler process based on the event_type.
Args:
event_type (str): event type delivered from workflow
fl_ctx (FLContext): FL context, including peer context and other information
"""
if event_type == EventType.START_RUN:
self._startup()
elif event_type == EventType.BEFORE_PROCESS_SUBMISSION:
self._before_accept(fl_ctx)
elif event_type == AppEventType.BEFORE_AGGREGATION:
self._before_aggregate(fl_ctx)
def _startup(self):
self._reset_stats()
def _reset_stats(self):
self.validation_mertic_global_weighted_sum = 0
self.global_sum_of_weights = 0
self.validation_mertic_select_weighted_sum = 0
self.select_sum_of_weights = 0
self.person_best_status = {}
def _before_accept(self, fl_ctx: FLContext):
peer_ctx = fl_ctx.get_peer_context()
shareable: Shareable = peer_ctx.get_prop(FLContextKey.SHAREABLE)
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
# check data_kind
if dxo.data_kind not in (
DataKind.WEIGHT_DIFF,
DataKind.WEIGHTS,
DataKind.COLLECTION,
):
self.log_debug(fl_ctx, f"I cannot handle {dxo.data_kind}")
return False
if dxo.data is None:
self.log_debug(fl_ctx, "no data to filter")
return False
# DXO for FedSM is in "collection" format, containing three dxo objects (global_weights, person_weights, select_weights)
# together with the meta information
contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)
client_name = shareable.get_peer_prop(ReservedKey.IDENTITY_NAME, default="?")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
if current_round == 0:
self.log_debug(fl_ctx, "skipping round 0")
return False # There is no aggregated model at round 0
if contribution_round != current_round:
self.log_debug(
fl_ctx,
f"discarding shareable from {client_name} for round: {contribution_round}. Current round is: {current_round}",
)
return False
# validation metric is a list of two numbers, corresponding to the two models,
# note that personalized model do not need to be averaged, just record the status of best model availability
# [global_metric, select_metric, person_best]
validation_metric = dxo.get_meta_prop(self.validation_metric_name)
if validation_metric is None:
self.log_debug(fl_ctx, f"validation metric not existing in {client_name}")
return False
else:
self.log_info(
fl_ctx,
f"validation metric {validation_metric} from client {client_name}",
)
if self.weigh_by_local_iter:
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1.0)
else:
n_iter = 1.0
aggregation_weights = self.aggregation_weights.get(client_name, 1.0)
self.log_debug(fl_ctx, f"aggregation weight: {aggregation_weights}")
self.validation_mertic_global_weighted_sum += validation_metric[0] * n_iter * aggregation_weights
self.global_sum_of_weights += n_iter
self.validation_mertic_select_weighted_sum += validation_metric[1] * n_iter * aggregation_weights
self.select_sum_of_weights += n_iter
self.person_best_status[client_name] = validation_metric[2]
return True
def _before_aggregate(self, fl_ctx):
if self.global_sum_of_weights == 0:
self.log_debug(fl_ctx, "nothing accumulated for model_global")
return False
if self.select_sum_of_weights == 0:
self.log_debug(fl_ctx, "nothing accumulated for model_selector")
return False
self.val_metric_global = self.validation_mertic_global_weighted_sum / self.global_sum_of_weights
self.val_metric_select = self.validation_mertic_select_weighted_sum / self.select_sum_of_weights
self.logger.debug(f"weighted validation metric for global model {self.val_metric_global}")
self.logger.debug(f"weighted validation metric for selector model{self.val_metric_select}")
self.logger.debug(f"best personalized model availability {self.person_best_status}")
if self.val_metric_global > self.best_val_metric_global:
self.best_val_metric_global = self.val_metric_global
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(
fl_ctx,
f"new best validation metric for global model at round {current_round}: {self.best_val_metric_global}",
)
# Fire event to notify a new best global model
self.fire_event("fedsm_best_model_available_global_weights", fl_ctx)
if self.val_metric_select > self.best_val_metric_select:
self.best_val_metric_select = self.val_metric_select
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(
fl_ctx,
f"new best validation metric for selector model at round {current_round}: {self.best_val_metric_select}",
)
# Fire event to notify a new best selector model
self.fire_event("fedsm_best_model_available_select_weights", fl_ctx)
for client_id in self.person_best_status.keys():
if self.person_best_status[client_id] == 1:
# Fire event to notify a new best personalized model
self.fire_event("fedsm_best_model_available_" + client_id, fl_ctx)
self._reset_stats()
return True
| NVFlare-main | research/fed-sm/jobs/fedsm_prostate/app/custom/widgets/intime_model_fedsm_selector.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .filters.gaussian_privacy import GaussianPrivacy
from .filters.gradinv import GradInversionInverter, Inverter
from .filters.image_sim import ImageSimMetric, SimMetric
from .filters.rdlv_filter import RelativeDataLeakageValueFilter
from .learners.cxr_learner import CXRLearner
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.dxo_filter import DXOFilter
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
class GaussianPrivacy(DXOFilter):
def __init__(self, sigma0=0.1, max_percentile=95, data_kinds: [str] = None):
"""Add Gaussian noise to shared model updates
Args:
sigma0: must be in >= 0, fraction of max value to compute noise
max_percentile: must be in 0..100, only update nonzero abs diff greater than percentile
data_kinds: kinds of DXO data to filter. If None,
`[DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]` is used.
"""
if not data_kinds:
data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
super().__init__(
supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF],
data_kinds_to_filter=data_kinds,
)
if not np.isscalar(sigma0):
raise ValueError(f"Expected a positive scalar for `sigma0` but received type {type(sigma0)}")
if sigma0 < 0.0:
raise ValueError(f"Expected a positive float for `sigma0` but received {sigma0}.")
if not np.isscalar(max_percentile):
raise ValueError(
f"Expected a positive scalar for `max_percentile` but received type {type(max_percentile)}"
)
if max_percentile < 0.0 or max_percentile > 100.0:
raise ValueError(f"Expected a float for `sigma0` between 0 and 100 but received {max_percentile}.")
self.sigma0 = sigma0
self.max_percentile = max_percentile
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Add Gaussian noise to data in dxo.
Args:
dxo: information from client
shareable: that the dxo belongs to
fl_ctx: context provided by workflow
Returns: filtered result.
"""
if self.sigma0 > 0.0:
weights = dxo.data
# abs delta
all_abs_values = np.concatenate([np.abs(weights[name].ravel()) for name in weights])
all_abs_nonzero = all_abs_values[all_abs_values > 0.0]
max_value = np.percentile(a=all_abs_nonzero, q=self.max_percentile, overwrite_input=False)
noise_sigma = self.sigma0 * max_value
n_vars = len(weights)
for var_name in weights:
weights[var_name] = weights[var_name] + np.random.normal(0.0, noise_sigma, np.shape(weights[var_name]))
self.log_info(
fl_ctx,
f"Added Gaussian noise to {n_vars} vars with sigma"
f" {noise_sigma}, "
f"sigma fraction: {self.sigma0}, "
f"{self.max_percentile:.4f}th percentile of nonzero values: {max_value:.4f}",
)
dxo.data = weights
else:
self.log_warning(fl_ctx, "Sigma fraction is zero. No noise is being applied...")
return dxo
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/filters/gaussian_privacy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import skimage.metrics
import torch
from scipy import spatial
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
class SimMetric(FLComponent):
def __init__(self):
"""Abstract class used to compute a similarity metric between the source data and reconstruction.
Returns:
similarity metric
"""
super().__init__()
def __call__(self, source, reference):
"""Subclass must implement this method to filter the provided DXO
Args:
source: source image
reference: reference image the source image should be compared to
Returns:
similarity metric
"""
raise NotImplementedError(f"Subclass of {self.__class__.__name__} must implement this method.")
class ImageSimMetric(SimMetric):
def __init__(self, metrics=None):
"""Implementation of `SimMetric` for imaging applications.
Args:
metrics: String or list of similarity metrics. Support "ssim", "cosine", and "norm".
Returns:
similarity metric
"""
super().__init__()
if not metrics:
self.metrics = ["ssim"]
else:
if isinstance(metrics, str):
self.metrics = [metrics]
elif isinstance(metrics, list):
self.metrics = metrics
else:
raise ValueError("Expected `metrics` to be string or list of strings.")
@staticmethod
def check_shape(img):
if len(np.shape(img)) > 2:
if np.shape(img)[2] > 3:
img = img[:, :, 0:3]
# sim metrics assume images to scaled 0...255
if np.max(img) <= 1.0:
img = img * 255.0
assert np.shape(img)[2] == 3, "Assuming RGB image here"
return img
def __call__(self, source, reference, fl_ctx: FLContext, is_channel_first=False):
# check type
if isinstance(source, torch.Tensor):
source = source.detach().cpu().numpy()
if isinstance(reference, torch.Tensor):
reference = reference.detach().cpu().numpy()
source = source.astype(np.float32)
reference = reference.astype(np.float32)
if is_channel_first:
source = np.swapaxes(source, -1, 0)
reference = np.swapaxes(reference, -1, 0)
source = self.check_shape(source)
reference = self.check_shape(reference)
if not source.shape == reference.shape:
raise ValueError(
f"`source` and `reference` must have the same dimensions but they were {source.shape} vs. {reference.shape}."
)
# TODO: convert these to warnings
assert np.min(source) >= 0, f"img min is {np.min(source)}"
assert np.min(reference) >= 0, f"img_recon min is {np.min(reference)}"
assert 1 < np.max(source) <= 255
assert 1 < np.max(reference) <= 255
# assert np.mean(img) > 1
# assert np.mean(img_recon) > 1
if np.mean(source) < 1:
self.log_warning(
fl_ctx,
f"[WARNING] image mean is very low {np.mean(source)} (min={np.min(source)}, max={np.max(source)})",
)
if np.mean(reference) < 1:
self.log_warning(
fl_ctx,
f"[WARNING] image mean is very low {np.mean(reference)} (min={np.min(reference)}, max={np.max(reference)})",
)
# compute metrics
outputs = {}
for metric in self.metrics:
if not isinstance(metric, str):
raise ValueError(f"Expect metric to be of type string but got type {type(metric)}")
if metric == "ssim":
out_value = skimage.metrics.structural_similarity(
reference, source, channel_axis=-1, data_range=256.0
) # ,
# gaussian_weights=True, sigma=1.5, # Wang et al.
# use_sample_covariance=False)
elif metric == "cosine":
out_value = 1 - spatial.distance.cosine(reference, source)
assert 0.0 <= out_value <= 1.0, f"cosine similarity is out of range {out_value}"
elif metric == "norm":
out_value = np.sqrt(np.sum((reference - source) ** 2))
else:
raise ValueError(f"Metric {metric} not supported! Choose from `ssim`, `cosine` or `norm`")
outputs[metric] = out_value
return outputs
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/filters/image_sim.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from copy import deepcopy
import numpy as np
import torch
from fl_gradient_inversion import FLGradientInversion
from monai.networks.nets.torchvision_fc import TorchVisionFCModel
from monai.transforms import (
Compose,
EnsureChannelFirst,
LoadImage,
NormalizeIntensity,
Resize,
SaveImage,
ScaleIntensity,
Transform,
)
from monai.utils import ImageMetaKey
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.learner_spec import Learner
class Inverter(FLComponent):
def __init__(self):
super().__init__()
def __call__(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext):
"""Subclass must implement this method to filter the provided DXO
Args:
dxo: the DXO to containing the data to be inverted
shareable: the shareable that the dxo belongs to
fl_ctx: the FL context
Returns:
Inversions
"""
raise NotImplementedError(f"Subclass of {self.__class__.__name__} must implement this method.")
class AddName(Transform):
def __init__(self, num_images=1):
self.num_images = num_images
self.idx = 0
def __call__(self, img):
if self.idx == self.num_images:
self.idx = 0
img.meta[ImageMetaKey.FILENAME_OR_OBJ] = f"recon_b{self.idx}"
self.idx += 1
return img
class GradInversionInverter(Inverter):
def __init__(
self,
learner_name: str = "learner",
cfg_file: str = "config/config_inversion.json",
bn_momentum: float = 0.1,
compute_update_sums: bool = True,
print_names: bool = True,
use_determinism: bool = True,
prior_transforms=None,
save_transforms=None,
save_fmt=".png",
):
"""Wrapper class calling gradient inversion. Assumes being used with `CXRLearner` or
Learners that have the same member variables to be accessed in `__call__()`.
Args:
learner_name: ID of the `Learner` component used to get training hyperparameters.
cfg_file: Configuration file used by `FLGradientInversion` class.
bn_momentum: Batch norm momentum used by the local trainer code in `Learner`. Defaults to 0.1.
compute_update_sums: Whether to print the absolute sum of the model updates. Defaults to `True`.
print_names: Whether to print the layer variable names of the model. Defaults to `True`.
use_determinism: Whether to use deterministic functions for the reconstruction. Defaults to `True`.
prior_transforms: Optional custom transforms to read prior images. Defaults to `None`.
save_transforms: Optional custom transforms to save the reconstructed images. Defaults to `None`.
save_fmt: Output format to save individual reconstructions. Defaults to ".png".
Returns:
Reconstructions
"""
super().__init__()
if not isinstance(learner_name, str):
raise ValueError(f"Expected `learner_name` of type `str` but received type {type(learner_name)}")
if not isinstance(cfg_file, str):
raise ValueError(f"Expected `cfg_file` of type `str` but received type {type(cfg_file)}")
if not isinstance(bn_momentum, float):
raise ValueError(f"Expected `bn_momentum` of type `float` but received type {type(bn_momentum)}")
self.learner_name = learner_name
self.cfg_file = cfg_file
self.bn_momentum = bn_momentum
self.compute_update_sums = compute_update_sums
self.print_names = print_names
self.prior_transforms = prior_transforms
self.save_transforms = save_transforms
self.save_fmt = save_fmt
self.cfg = None
self.save_path = None
self.use_determinism = use_determinism
if self.use_determinism:
torch.backends.cudnn.deterministic = True
@staticmethod
def run_inversion(
cfg, updates, global_weights, bn_momentum=0.1, prior_transforms=None, save_transforms=None, save_fmt=".png"
):
"""Wrapper function calling `FLGradientInversion`.
Args:
cfg: Configuration dictionary used by `FLGradientInversion` class.
updates: The model updates sent by the client.
global_weights: The current state dict of global model the `updates` are with respect to.
bn_momentum: Batch norm momentum used by the local trainer code in `Learner`. Defaults to 0.1.
prior_transforms: Optional custom transforms to read prior images. Defaults to `None`.
save_transforms: Optional custom transforms to save the reconstructed images. Defaults to `None`.
save_fmt: Output format to save individual reconstructions. Defaults to ".png".
Returns:
Reconstructions
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = TorchVisionFCModel(
model_name=cfg["model_name"],
num_classes=cfg["num_classes"],
pretrained=False,
)
# get global weights
if "model" in global_weights:
net.load_state_dict(global_weights["model"])
else:
net.load_state_dict(global_weights)
# compute weight changes
update_sum = 0.0
for name, _ in net.named_parameters():
update_sum += np.sum(np.abs(updates[name]))
assert update_sum > 0.0, "All updates are zero!"
model_bn = deepcopy(net).cuda()
update_sum = 0.0
new_state_dict = model_bn.state_dict()
for n in updates.keys():
val = updates[n]
update_sum += np.sum(np.abs(val))
new_state_dict[n] += torch.tensor(
val,
dtype=new_state_dict[n].dtype,
device=new_state_dict[n].device,
)
model_bn.load_state_dict(new_state_dict)
assert update_sum > 0.0, "All updates are zero!"
n_bn_updated = 0
global_state_dict = net.state_dict()
# Compute full BN stats
bn_stats = {}
for param_name in updates:
if "bn" in param_name or "batch" in param_name or "running" in param_name:
bn_stats[param_name] = global_weights[param_name] + updates[param_name]
for n in bn_stats.keys():
if "running" in n:
xt = (bn_stats[n] - (1 - bn_momentum) * global_state_dict[n].numpy()) / bn_momentum
n_bn_updated += 1
bn_stats[n] = xt
# move weight updates and model to gpu
net = net.to(device)
grad_lst = []
for name, _ in net.named_parameters():
val = torch.from_numpy(updates[name]).cuda()
grad_lst.append([name, val])
# Use same transforms to load prior as used in training routine
# TODO: make configurable
if prior_transforms is None:
prior_transforms = Compose(
[
LoadImage(image_only=True),
EnsureChannelFirst(),
NormalizeIntensity(subtrahend=0, divisor=255, dtype="float32"),
Resize(spatial_size=[224, 224]),
]
)
if save_transforms is None:
save_transforms = Compose(
[
ScaleIntensity(minv=0, maxv=255),
AddName(num_images=cfg["local_num_images"]),
SaveImage(
output_dir=cfg["save_path"],
output_ext=save_fmt,
separate_folder=False,
output_postfix="",
),
]
)
# Compute inversion
grad_inversion_engine = FLGradientInversion(
network=net,
grad_lst=grad_lst,
bn_stats=bn_stats,
model_bn=model_bn,
prior_transforms=prior_transforms,
save_transforms=save_transforms,
)
best_inputs, targets = grad_inversion_engine(cfg)
return best_inputs, targets
def __call__(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext):
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
if not self.cfg:
self.cfg_file = os.path.join(app_root, self.cfg_file)
if os.path.isfile(self.cfg_file):
with open(self.cfg_file, "r") as f:
self.cfg = json.load(f)
else:
raise ValueError(f"`cfg_file` file does not exist at {self.cfg_file}")
self.save_path = os.path.join(app_root, self.cfg["save_path"])
self.logger.info(f"Using full BN stats with momentum {self.bn_momentum} ! \n")
# get current learner & global model
engine = fl_ctx.get_engine()
_learner = engine.get_component(self.learner_name)
if not _learner:
raise ValueError(f"No Learner available with name {self.learner_name}")
elif not isinstance(_learner, Learner):
raise ValueError(f"Expected `learner` to be of type `Learner` but got type {type(_learner)}")
if _learner:
global_model = _learner.global_weights
if "weights" in global_model:
global_model = global_model["weights"]
if global_model is None:
raise ValueError("No global model exists!")
# get updates
weight_updates = dxo.data
if weight_updates is None or len(weight_updates) == 0:
raise ValueError(f"No weight_updates available or empty: {weight_updates}")
if dxo.data_kind != DataKind.WEIGHT_DIFF:
raise ValueError(f"Expected weight updates to be of data_kind `WEIGHT_DIFF` but got {dxo.data_kind}")
if self.compute_update_sums:
sum_updates, sum_bn_updates = 0.0, 0.0
for k in weight_updates.keys():
if self.print_names:
print(f"Inverting {k}")
if "bn" in k or "batch" in k or "running" in k:
sum_bn_updates += np.sum(np.abs(weight_updates[k]))
else:
sum_updates += np.sum(np.abs(weight_updates[k]))
self.log_info(
fl_ctx,
f"weight update sum {sum_updates}, bn update sum {sum_bn_updates}",
)
if sum_updates == 0.0:
self.log_warning(fl_ctx, "Gradient update sum is zero!")
if sum_bn_updates == 0.0:
self.log_warning(fl_ctx, "BN sum is zero!")
# run inversion
self.cfg["save_path"] = os.path.join(
self.save_path,
f"{fl_ctx.get_identity_name()}_rnd{_learner.current_round}",
)
self.cfg["batch_size"] = _learner.batch_size
self.cfg["local_bs"] = _learner.batch_size
self.cfg["num_classes"] = _learner.num_class
self.cfg["lr_local"] = _learner.get_lr()[0] # use learning rate from first layer
self.cfg["local_epoch"] = _learner.aggregation_epochs
self.cfg["local_num_images"] = int(len(_learner.train_loader) * _learner.batch_size)
self.log_info(fl_ctx, f"Run inversion with config {self.cfg}")
best_images, _ = self.run_inversion(
cfg=self.cfg,
updates=weight_updates,
global_weights=global_model,
bn_momentum=self.bn_momentum,
prior_transforms=self.prior_transforms,
save_transforms=self.save_transforms,
save_fmt=self.save_fmt,
)
return best_images
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/filters/gradinv.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
import numpy as np
import torch
from monai.data import CacheDataset, load_decathlon_datalist
from monai.transforms import (
Compose,
EnsureChannelFirst,
EnsureChannelFirstd,
LoadImage,
LoadImaged,
RepeatChannel,
RepeatChanneld,
Resize,
Resized,
ScaleIntensity,
)
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.dxo_filter import DXOFilter
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.app_constant import AppConstants
from .gradinv import Inverter
from .image_sim import SimMetric
class RelativeDataLeakageValueFilter(DXOFilter):
def __init__(
self,
data_root: str,
dataset_json: str,
frequency: int = 1,
start_round: int = 0,
inverter_name: str = "grad_inverter",
sim_metric_name: str = "sim_metric",
prior_filename: str = None,
image_key: str = "image",
rdlv_reduce: str = "max",
rdlv_threshold: float = 1.0,
data_kinds: [str] = None,
save_best_matches: bool = False,
):
"""Filter calling gradient inversion and computing the "Relative Data Leakage Value" (RDLV).
Data inside the DXO will be removed if any RDLV is above the `rdlv_threshold`.
See https://arxiv.org/abs/2202.06924 for details.
Args:
data_root: Data root for local training set.
dataset_json: Data list of local training set.
frequency: Frequency in FL rounds for which to run inversion code and the filter. Defaults to 1.
start_round: FL round to start inversion. Defaults to 0.
inverter_name: ID of inverter component.
sim_metric_name: ID of the similarity metric component.
prior_filename: Prior image used to initialize the attack and to compute RDLV.
image_key: Dictionary key used by the data loader.
rdlv_reduce: Which operation to use to reduce the RDLV values. Defaults to "max".
rdlv_threshold: Threshold on RDLV to determine whether dxo.data will be passed on or filtered out.
data_kinds: kinds of DXO data to filter. If None,
`[DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]` is used.
save_best_matches: Whether to save the best reconstruction.
Returns:
Filtered DXO data. Empty DXO if any of the RDLV values is above `rdlv_threshold`.
The computed RDLV values and hyperparameters will be saved as NumPy-file (*.npy) in the app_root.
"""
if not data_kinds:
data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
super().__init__(
supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF],
data_kinds_to_filter=data_kinds,
)
self.frequency = frequency
self.start_round = start_round
self.inverter_name = inverter_name
self.sim_metric_name = sim_metric_name
self.inverter = None
self.sim_metric = None
self.image_key = image_key
self.rdlv_reduce = rdlv_reduce
self.rdlv_threshold = rdlv_threshold
self.prior_filename = prior_filename
self.save_best_matches = save_best_matches
# TODO: make configurable
self.data_root = data_root
self.dataset_json = dataset_json
self.train_set = "training"
self.cache_rate = 1.0
self.num_workers = 2
# some input checks
if isinstance(data_root, str):
if not os.path.isdir(data_root):
raise ValueError(f"`data_root` directory does not exist at {data_root}")
else:
raise ValueError(f"Expected `data_root` of type `str` but received type {type(data_root)}")
if isinstance(dataset_json, str):
if not os.path.isfile(dataset_json):
raise ValueError(f"`dataset_json` file does not exist at {dataset_json}")
else:
raise ValueError(f"Expected `dataset_json` of type `str` but received type {type(dataset_json)}")
self.transform_train = None
self.train_dataset = None
self.train_loader = None
self.prior = None
self.app_root = None
self.recon_transforms = Compose([ScaleIntensity(minv=0, maxv=255)])
def _create_train_loader(self, fl_ctx: FLContext):
if self.train_loader is None:
# create data loader for computing RDLV
self.transform_train = Compose(
[
LoadImaged(keys=[self.image_key]),
EnsureChannelFirstd(keys=[self.image_key]),
Resized(keys=[self.image_key], spatial_size=[224, 224]),
RepeatChanneld(keys=[self.image_key], repeats=3),
]
)
train_list = load_decathlon_datalist(
data_list_file_path=self.dataset_json,
is_segmentation=False,
data_list_key=self.train_set,
base_dir=self.data_root,
)
self.train_dataset = CacheDataset(
data=train_list,
transform=self.transform_train,
cache_rate=self.cache_rate,
num_workers=self.num_workers,
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=1,
shuffle=False,
)
self.log_info(fl_ctx, f"Training Size ({self.train_set}): {len(train_list)}")
def _setup(self, fl_ctx: FLContext):
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
self._create_train_loader(fl_ctx)
# get inverter during first process
if self.inverter is None:
engine = fl_ctx.get_engine()
self.inverter = engine.get_component(self.inverter_name)
if not self.inverter:
raise ValueError(f"No Inverter available with name {self.inverter_name}")
elif not isinstance(self.inverter, Inverter):
raise ValueError(f"Expected `inverter` to be of type `Inverter` but got type {type(self.inverter)}")
# get sim_metric during first process
if self.sim_metric is None:
engine = fl_ctx.get_engine()
self.sim_metric = engine.get_component(self.sim_metric_name)
if not self.sim_metric:
raise ValueError(f"No SimMetric available with name {self.sim_metric_name}")
elif not isinstance(self.sim_metric, SimMetric):
raise ValueError(
f"Expected `sim_metric` to be of type `SimMetric` but got type {type(self.sim_metric)}"
)
# get prior
if self.prior_filename and self.prior is None:
prior_transforms = Compose(
[
LoadImage(image_only=True),
EnsureChannelFirst(),
Resize(spatial_size=[224, 224]),
RepeatChannel(repeats=3),
]
)
self.prior = prior_transforms(self.prior_filename)
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Compute gradient inversions and compute relative data leakage value (RDLV).
Filter result based on the given threshold.
Args:
dxo: information from client
shareable: that the dxo belongs to
fl_ctx: context provided by workflow
Returns: filtered result.
"""
self._setup(fl_ctx)
# Compute inversions
current_round = dxo.get_meta_prop(AppConstants.CURRENT_ROUND)
if current_round is None:
raise ValueError("No current round available!")
if current_round % self.frequency == 0 and current_round >= self.start_round:
recons = self.inverter(dxo=dxo, shareable=shareable, fl_ctx=fl_ctx)
self.log_info(fl_ctx, f"Created reconstructions of shape {np.shape(recons)}")
# compute (relative) data leakage value
try:
self.log_info(
fl_ctx,
f"Computing sim metrics for {len(self.train_loader)}x{len(recons)} pairs of images",
)
(img_recon_sim_reduced, img_recon_sim, best_matches, closest_idx,) = self.compute_rdlv(
train_loader=self.train_loader,
recons=recons,
sim_metric=self.sim_metric,
reduce=self.rdlv_reduce,
recon_transforms=self.recon_transforms,
image_key=self.image_key,
prior=self.prior,
)
if self.rdlv_reduce == "max":
rdlv = np.max(img_recon_sim_reduced, axis=0)
elif self.rdlv_reduce == "min":
rdlv = np.min(img_recon_sim_reduced, axis=0)
else:
raise ValueError(f"No such `rdlv_reduce` supported {self.rdlv_reduce}")
except Exception as e:
raise RuntimeError("Computing RDLV failed!") from e
self.log_info(fl_ctx, f"RDLV: {rdlv}")
results = {
"img_recon_sim_reduced": img_recon_sim_reduced,
"img_recon_sim": img_recon_sim,
"closest_idx": closest_idx,
"site": fl_ctx.get_identity_name(),
"round": current_round,
}
if self.save_best_matches:
results["best_matches"] = best_matches
save_path = os.path.join(self.app_root, f"rdvl_round{current_round}.npy")
np.save(save_path, results)
# Remove data if above threshold
if np.any(rdlv > self.rdlv_threshold):
self.log_warning(
fl_ctx,
f"At least one RDLV of {rdlv} is over the threshold {self.rdlv_threshold}! Remove data.",
)
dxo.data = {}
return dxo
@staticmethod
def compute_rdlv(
train_loader,
recons,
sim_metric,
reduce="max",
recon_transforms=None,
image_key="image",
prior=None,
fl_ctx: FLContext = None,
):
# TODO: Enforce using only one metric
# TODO: use recon/reference terminology
img_recon_sim = np.nan * np.ones((len(train_loader), len(recons), len(sim_metric.metrics)))
if prior is not None:
img_prior_sim = np.nan * np.ones((len(train_loader), len(sim_metric.metrics)))
pairs = []
imgs = []
for i, batch_data in enumerate(train_loader):
img = batch_data[image_key]
if img.shape[0] != 1:
raise ValueError(f"Assume batch dimension to be 1 but received {image_key} batch of shape {img.shape}")
img = img[0, ...] # assume batch size 1
imgs.append(img)
# compute similarity to prior
if prior is not None:
_outputs = sim_metric(
source=prior,
reference=img,
fl_ctx=fl_ctx,
is_channel_first=True,
)
if isinstance(_outputs, dict):
for m_idx, m_name in enumerate(sim_metric.metrics): # TODO: make metrics part of the SimMetrics API
img_prior_sim[i, m_idx] = _outputs[m_name]
else:
img_prior_sim[i, ...] = _outputs
# compute similarity to reconstructions
for r, recon in enumerate(recons):
if recon_transforms:
recon = recon_transforms(recon)
_outputs = sim_metric(
source=recon,
reference=img,
fl_ctx=fl_ctx,
is_channel_first=True,
)
if isinstance(_outputs, dict):
for m_idx, m_name in enumerate(sim_metric.metrics): # TODO: make metrics part of the SimMetrics API
img_recon_sim[i, r, m_idx] = _outputs[m_name]
else:
img_recon_sim[i, r, ...] = _outputs
pairs.append([i, r])
# Compute relative value
if prior is not None:
for m_idx in range(len(sim_metric.metrics)):
img_recon_sim[i, r, m_idx] = (
img_recon_sim[i, r, m_idx] - img_prior_sim[i, m_idx]
) / img_prior_sim[i, m_idx]
if (i + 1) % 32 == 0:
print(f"processing original image {i + 1} of {len(train_loader)}...")
if reduce == "max":
closest_idx = np.argmax(img_recon_sim, axis=1)
elif reduce == "min":
closest_idx = np.argmin(img_recon_sim, axis=1)
elif reduce is None:
closest_idx = None
else:
raise NotImplementedError(f"No such reduce function implemented `{reduce}`")
if closest_idx is not None:
img_recon_sim_reduced = []
best_matches = []
for i, idx in enumerate(closest_idx):
# record best value for each image using first metric
img_recon_sim_reduced.append(img_recon_sim[i, idx[0], ...])
best_matches.append([imgs[i], recons[idx[0]]])
else:
img_recon_sim_reduced = img_recon_sim
best_matches = []
return (
np.array(img_recon_sim_reduced),
img_recon_sim,
best_matches,
closest_idx,
)
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/filters/rdlv_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--workspaces",
type=str,
default="./workspaces",
help="Workdir containing rdvl_*.npy files.",
)
args = parser.parse_args()
rdvl_files = glob.glob(os.path.join(args.workspaces, "**", "rdvl_*.npy"), recursive=True)
assert len(rdvl_files) > 0, f"No RDVL files found in {args.workspace}"
results = {"RDVL": [], "Site": [], "Round": [], "sigma0": [], "test_accuracy": []}
for rdvl_file in rdvl_files:
_result = np.load(rdvl_file, allow_pickle=True).item()
img_recon_sim_reduced = _result["img_recon_sim_reduced"]
# read sigma0 from client config
client_config_file = os.path.join(os.path.dirname(rdvl_file), "config", "config_fed_client.json")
with open(client_config_file, "r") as f:
client_config = json.load(f)
gaussian_filter = client_config["task_result_filters"][0]["filters"][0]
assert (
"GaussianPrivacy" in gaussian_filter["path"]
), f"Expected filter to GaussianPrivacy but got {gaussian_filter['path']}"
sigma0 = gaussian_filter["args"]["sigma0"]
# read best global model accuracy from cross-site validation
cross_val_file = os.path.join(
os.path.dirname(os.path.dirname(rdvl_file)), "cross_site_val", "cross_val_results.json"
)
with open(cross_val_file, "r") as f:
cross_val = json.load(f)
best_model_perfrom = cross_val["site-1"]["SRV_best_FL_global_model.pt"]
for rdvl in img_recon_sim_reduced:
results["RDVL"].append(float(rdvl))
results["Site"].append(_result["site"])
results["Round"].append(_result["round"])
results["sigma0"].append(float(sigma0))
results["test_accuracy"].append(best_model_perfrom["test_accuracy"])
# plot RDVL
sns.lineplot(x="sigma0", y="RDVL", hue="Site", data=results)
plt.grid(True)
plt.xlabel("Gaussian Privacy ($\sigma_0$)")
plt.plot([np.min(results["sigma0"]), np.max(results["sigma0"])], [0, 0], "k", linewidth=1.0)
# plot accuracy
ax2 = plt.twinx()
sns.lineplot(x="sigma0", y="test_accuracy", data=results, color="tab:gray", ax=ax2)
ax2.lines[0].set_linestyle("--")
plt.grid(False)
plt.ylim([0.0, 1.0])
plt.ylabel("Testing Accuracy")
plt.legend(["Testing Accuracy"], loc="lower left")
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/utils/plot_rdlv_vs_gauss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import shutil
def load_config(config_file):
with open(config_file, "r") as f:
return json.load(f)
def save_config(config_file, config):
with open(config_file, "w") as f:
json.dump(config, f, indent=4)
def create_server_config(server_config_file, args):
server_config = load_config(server_config_file)
server_config["min_clients"] = args.n_clients
server_config["num_rounds"] = args.num_rounds
out_server_config_file = os.path.join(args.output, "server", "config", "config_fed_server.json")
if not os.path.isdir(os.path.dirname(out_server_config_file)):
os.makedirs(os.path.dirname(out_server_config_file))
save_config(out_server_config_file, server_config)
print(
f"Created {out_server_config_file} to use min_clients={server_config['min_clients']} and num_rounds={server_config['num_rounds']}"
)
def create_client_config(client_config_file, args, client, bs):
client_config = load_config(client_config_file)
client_config["DATA_ROOT"] = args.data_root
client_config["DATASET_JSON"] = args.dataset_json_prefix + f"{client}.json"
client_config["BATCH_SIZE"] = bs
client_config["SIGMA0"] = args.sigma0
if args.prior_file and "PRIOR_FILE" in client_config:
client_config["PRIOR_FILE"] = args.prior_file
out_client_config_file = os.path.join(args.output, f"site-{client}", "config", "config_fed_client.json")
if not os.path.isdir(os.path.dirname(out_client_config_file)):
os.makedirs(os.path.dirname(out_client_config_file))
save_config(out_client_config_file, client_config)
print(
f"Created {out_client_config_file} to use DATA_ROOT={client_config['DATA_ROOT']}, DATASET_JSON={client_config['DATASET_JSON']}, and SIGMA0={client_config['SIGMA0']}"
)
if "PRIOR_FILE" in client_config:
print(f"Used prior_file {client_config['PRIOR_FILE']}")
def create_gradinv_config(inv_config_file, args, client):
inv_config = load_config(inv_config_file)
inv_config["img_prior"] = args.prior_file
out_inv_config_file = os.path.join(args.output, f"site-{client}", "config", "config_inversion.json")
if not os.path.isdir(os.path.dirname(out_inv_config_file)):
os.makedirs(os.path.dirname(out_inv_config_file))
save_config(out_inv_config_file, inv_config)
print(f"Created {out_inv_config_file} to use prior_file {inv_config['img_prior']}")
def create_meta_config(clients, meta_config_file, args):
meta_config = load_config(meta_config_file)
deploy_map = {"server": ["server"]}
for client in clients:
deploy_map.update({f"site-{client}": [f"site-{client}"]})
meta_config["deploy_map"] = deploy_map
out_meta_config_file = os.path.join(args.output, "meta.json")
if not os.path.isdir(os.path.dirname(out_meta_config_file)):
os.makedirs(os.path.dirname(out_meta_config_file))
save_config(out_meta_config_file, meta_config)
print(f"Created {out_meta_config_file}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--app_folder",
type=str,
help="Folder containing app config files in JSON format.",
)
parser.add_argument(
"--output",
type=str,
help="Output folder containing the job config.",
)
parser.add_argument(
"--data_root",
type=str,
help="Data root.",
)
parser.add_argument("--prior_file", type=str, help="Prior image filename")
parser.add_argument(
"--dataset_json_prefix",
type=str,
default="data_200val+rest_client",
help="Prefix name for dataset JSON file.",
)
parser.add_argument(
"--n_clients",
type=int,
default=1,
help="Number of clients to generate.",
)
parser.add_argument(
"--num_rounds",
type=int,
default=100,
help="Number of federated learning rounds.",
)
parser.add_argument(
"--invert_clients",
type=str,
default="9",
help="Comma-separated list of client numbers to invert. Defaults to `high-risk` client 9.",
)
parser.add_argument(
"--batch_sizes",
type=str,
default="1",
help="Comma-separated list of batches for each client.",
)
parser.add_argument(
"--sigma0",
type=float,
default=0.0,
help="Noise level for `GaussianPrivacy` filter.",
)
args = parser.parse_args()
if os.path.isdir(args.output):
print(f"Deleting previous job_config at {args.output}")
shutil.rmtree(args.output)
invert_clients = [int(x) for x in args.invert_clients.split(",")]
batch_sizes = [int(x) for x in args.batch_sizes.split(",")]
# Check template app folder
assert os.path.isdir(args.app_folder), f"app_folder does not exist at {args.app_folder}"
# create server config
server_config_file = os.path.join(args.app_folder, "server", "config", "config_fed_server.json")
create_server_config(server_config_file, args)
# if n_clients is 1, we use the "high-risk" client 9's dataset
if args.n_clients == 1:
clients = [9]
else:
clients = list(range(1, args.n_clients + 1))
# create meta.json
meta_config_file = os.path.join(args.app_folder, "meta.json")
create_meta_config(clients, meta_config_file, args)
# create client configs
for client, bs in zip(clients, batch_sizes):
# if n_clients is 1, we use the "high-risk" client 9's dataset
if args.n_clients == 1:
client = 9
if client in invert_clients:
print(f"Inverting site-{client} ...")
client_config_file = os.path.join(args.app_folder, "client", "config", "config_fed_client_inv.json")
inv_config_file = os.path.join(args.app_folder, "client", "config", "config_inversion.json")
# create gradient inversion config
create_gradinv_config(inv_config_file, args, client)
else:
client_config_file = os.path.join(args.app_folder, "client", "config", "config_fed_client_noinv.json")
# create client config file
create_client_config(client_config_file, args, client, bs)
if __name__ == "__main__":
main()
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/utils/create_job_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--workspace",
type=str,
default="./workspace",
help="Workdir containing rdvl_*.npy files.",
)
args = parser.parse_args()
rdvl_files = glob.glob(os.path.join(args.workspace, "**", "rdvl_*.npy"), recursive=True)
assert len(rdvl_files) > 0, f"No RDVL files found in {args.workspace}"
results = {
"RDVL": [],
"Site": [],
"Round": [],
}
for rdvl_file in rdvl_files:
_result = np.load(rdvl_file, allow_pickle=True).item()
img_recon_sim_reduced = _result["img_recon_sim_reduced"]
for rdvl in img_recon_sim_reduced:
results["RDVL"].append(float(rdvl))
results["Site"].append(_result["site"])
results["Round"].append(_result["round"])
# plot
sns.lineplot(x="Round", y="RDVL", hue="Site", data=results)
plt.plot([np.min(results["Round"]), np.max(results["Round"])], [0, 0], "k", linewidth=1.0)
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/utils/plot_rdlv_per_round.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import monai
import numpy as np
import torch
from monai.data import CacheDataset, load_decathlon_datalist
from monai.networks.nets.torchvision_fc import TorchVisionFCModel
from monai.transforms import (
CastToTyped,
Compose,
EnsureChannelFirstd,
EnsureTyped,
LoadImaged,
NormalizeIntensityd,
RepeatChanneld,
Resized,
ToNumpyd,
)
from sklearn.metrics import roc_auc_score
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss
class CXRLearner(Learner):
def __init__(
self,
data_root: str = "./dataset",
dataset_json: str = "./dataset.json",
aggregation_epochs: int = 1,
lr: float = 1e-2,
fedproxloss_mu: float = 0.0,
analytic_sender_id: str = "analytic_sender",
batch_size: int = 64,
num_workers: int = 0,
train_set: str = "training",
valid_set: str = "validation",
test_set: str = "testing",
model_name: str = "resnet18",
num_class: int = 2,
cache_rate: float = 1.0,
seed: int = 0,
):
"""Simple CXR Trainer
Args:
data_root: root directory for data
dataset_json: JSON data list specifying the train, validation, and test sets.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
lr: local learning rate. Float number. Defaults to 1e-2.
fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx).
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
batch_size: batch size for training and validation.
num_workers: number of workers for data loaders.
train_set: name of train data list. Defaults to "training".
valid_set: name of train data list. Defaults to "validation".
test_set: name of train data list. Defaults to "testing".
model_name: name of torchvision model compatible with MONAI's TorchVisionFCModel class. Defaults to "resnet18".
num_class: Number of prediction classes. Defaults to 2.
cache_rate: Cache rate used in CacheDataset.
seed: Seed used for deterministic training.
Returns:
a Shareable with the model updates, validation scores, or the best local model depending on the specified task.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
# some input checks
if isinstance(data_root, str):
if not os.path.isdir(data_root):
raise ValueError(f"`data_root` directory does not exist at {data_root}")
else:
raise ValueError(f"Expected `data_root` of type `str` but received type {type(data_root)}")
if isinstance(dataset_json, str):
if not os.path.isfile(dataset_json):
raise ValueError(f"`dataset_json` file does not exist at {dataset_json}")
else:
raise ValueError(f"Expected `dataset_json` of type `str` but received type {type(dataset_json)}")
self.seed = seed
self.data_root = data_root
self.dataset_json = dataset_json
self.aggregation_epochs = aggregation_epochs
self.lr = lr
self.fedproxloss_mu = fedproxloss_mu
self.best_acc = 0.0
self.batch_size = batch_size
self.num_workers = num_workers
self.train_set = train_set
self.valid_set = valid_set
self.test_set = test_set
self.model_name = model_name
self.num_class = num_class
self.cache_rate = cache_rate
self.writer = None
self.analytic_sender_id = analytic_sender_id
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# following will be created in initialize() or later
self.app_root = None
self.client_id = None
self.local_model_file = None
self.best_local_model_file = None
self.writer = None
self.device = None
self.model = None
self.optimizer = None
self.scheduler = None
self.criterion = None
self.criterion_prox = None
self.transform_train = None
self.transform_valid = None
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.train_loader = None
self.valid_loader = None
self.test_loader = None
self.global_weights = None
self.current_round = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when the run starts, this is where the actual settings get initialized for trainer
if self.seed is not None:
self.logger.info(f"Use deterministic training with seed={self.seed}")
monai.utils.misc.set_determinism(seed=self.seed, use_deterministic_algorithms=True)
# Set the paths according to fl_ctx
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized at \n {self.app_root} \n with args: {fl_args}",
)
self.local_model_file = os.path.join(self.app_root, "local_model.pt")
self.best_local_model_file = os.path.join(self.app_root, "best_local_model.pt")
# Select local TensorBoard writer or event-based writer for streaming
self.writer = parts.get(self.analytic_sender_id) # user configured config_fed_client.json for streaming
if not self.writer: # use local TensorBoard writer only
self.writer = SummaryWriter(self.app_root)
# set the training-related parameters
# can be replaced by a config-style block
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = TorchVisionFCModel(
model_name=self.model_name,
num_classes=self.num_class,
bias=True,
pretrained=False, # server uses pretrained weights and initializes clients
)
self.model = self.model.to(self.device)
self.criterion = torch.nn.CrossEntropyLoss()
if self.fedproxloss_mu > 0:
self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}")
self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu)
""" Data """
# Set datalists
train_list = load_decathlon_datalist(
data_list_file_path=self.dataset_json,
is_segmentation=False,
data_list_key=self.train_set,
base_dir=self.data_root,
)
val_list = load_decathlon_datalist(
data_list_file_path=self.dataset_json,
is_segmentation=False,
data_list_key=self.valid_set,
base_dir=self.data_root,
)
# test set is optional
try:
test_list = load_decathlon_datalist(
data_list_file_path=self.dataset_json,
is_segmentation=False,
data_list_key=self.test_set,
base_dir=self.data_root,
)
except Exception as e:
test_list = []
self.log_warning(fl_ctx, f"Could not create test_list: {e}")
self.log_info(
fl_ctx,
f"{self.client_id}: Training Size ({self.train_set}): {len(train_list)}, "
f"Validation Size ({self.valid_set}): {len(val_list)}, "
f"Testing Size ({self.test_set}): {len(test_list)}",
)
if self.batch_size > len(train_list):
self.batch_size = len(train_list)
self.transform_train = Compose(
[
LoadImaged(keys=["image"]),
EnsureChannelFirstd(keys=["image"]),
NormalizeIntensityd(keys=["image"], subtrahend=0, divisor=255, dtype="float32"),
RepeatChanneld(keys=["image"], repeats=3),
NormalizeIntensityd(
keys=["image"],
subtrahend=[0.485, 0.456, 0.406],
divisor=[0.229, 0.224, 0.225],
dtype="float32",
channel_wise=True,
),
Resized(keys=["image"], spatial_size=[224, 224]),
ToNumpyd(keys=["label"]),
CastToTyped(keys=["label"], dtype="long"),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_valid = self.transform_train
self.train_dataset = CacheDataset(
data=train_list,
transform=self.transform_train,
cache_rate=self.cache_rate,
num_workers=self.num_workers,
)
self.valid_dataset = CacheDataset(
data=val_list,
transform=self.transform_valid,
cache_rate=self.cache_rate,
num_workers=self.num_workers,
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
)
self.valid_loader = torch.utils.data.DataLoader(
self.valid_dataset,
batch_size=self.batch_size,
shuffle=False,
)
if test_list: # optional
self.test_dataset = CacheDataset(
data=test_list,
transform=self.transform_valid,
cache_rate=self.cache_rate,
num_workers=self.num_workers,
)
self.test_loader = torch.utils.data.DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
)
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.0)
self.log_info(fl_ctx, "No-private training")
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=40, gamma=0.1)
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
model_global,
abort_signal: Signal,
val_freq: int = 0,
):
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
avg_loss = 0.0
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, torch.squeeze(labels, axis=1))
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
avg_loss += loss.item()
avg_loss = avg_loss / len(train_loader)
self.writer.add_scalar("train_loss", avg_loss, current_step)
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} "
f"(lr={self.get_lr()[0]}) avg_loss: {avg_loss:.4f}",
)
if val_freq > 0 and epoch % val_freq == 0:
acc, _ = self.local_valid(
self.valid_loader,
abort_signal,
tb_id="val_local_model",
fl_ctx=fl_ctx,
)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
# update lr scheduler at end of epoch
self.scheduler.step()
def save_model(self, is_best=False):
# save model
model_weights = self.model.state_dict()
save_model_weights = {}
for k in model_weights.keys():
save_model_weights[k.replace("_module.", "")] = model_weights[k] # remove the prefix added by opacus
save_dict = {
"model_weights": save_model_weights,
"epoch": self.epoch_global,
}
if is_best:
save_dict.update({"best_acc": self.best_acc})
torch.save(save_dict, self.best_local_model_file)
else:
torch.save(save_dict, self.local_model_file)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
self.current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(
fl_ctx,
f"Current/Total Round: {self.current_round + 1}/{total_rounds}",
)
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed") from e
if n_loaded == 0:
raise ValueError(f"No weights loaded for training! Received weight dict is {global_weights}")
self.log_info(
fl_ctx,
f"Loaded global weights from {n_loaded} of {len(local_var_dict)} local layers.",
)
self.model.load_state_dict(local_var_dict)
self.global_weights = local_var_dict # update global weights so they can be accessed by inversion filter
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss or SCAFFOLD
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
val_freq=1,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# perform valid after local train
acc, auc = self.local_valid(
self.valid_loader,
abort_signal,
tb_id="val_local_model",
fl_ctx=fl_ctx,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_local_model: accuracy: {acc:.4f}, auc: {auc:.4f}")
# save model
self.save_model(is_best=False)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
diff_norm = 0.0
n_global, n_local = 0, 0
for var_name in local_weights:
n_local += 1
if var_name not in global_weights:
continue
model_diff[var_name] = np.subtract(
local_weights[var_name].cpu().numpy(), global_weights[var_name], dtype=np.float32
)
if np.any(np.isnan(model_diff[var_name])):
self.system_panic(f"{var_name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
n_global += 1
diff_norm += np.linalg.norm(model_diff[var_name])
if n_global != n_local:
raise ValueError(
f"Could not compute delta for all layers! Only {n_local} local of {n_global} global layers computed..."
)
self.log_info(
fl_ctx,
f"diff norm for {n_local} local of {n_global} global layers: {diff_norm}",
)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
dxo.set_meta_prop(
AppConstants.CURRENT_ROUND, self.current_round
) # TODO: check this is already available on server
self.log_info(fl_ctx, "Local epochs finished. Returning shareable.")
return dxo.to_shareable()
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
# Retrieve the best local model saved during training.
if model_name == ModelName.BEST_MODEL:
model_data = None
try:
# load model to cpu as server might or might not have a GPU
model_data = torch.load(self.best_local_model_file, map_location="cpu")
except Exception as e:
raise ValueError("Unable to load best model") from e
# Create DXO and shareable from model data.
if model_data:
# convert weights to numpy to support FOBS
model_weights = model_data["model_weights"]
for k, v in model_weights.items():
model_weights[k] = v.numpy()
dxo = DXO(data_kind=DataKind.WEIGHTS, data=model_weights)
return dxo.to_shareable()
else:
# Set return code.
self.log_error(
fl_ctx,
f"best local model not found at {self.best_local_model_file}.",
)
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
else:
raise ValueError(f"Unknown model_type: {model_name}") # Raised errors are caught in LearnerExecutor class.
def local_valid(self, valid_loader, abort_signal: Signal, tb_id=None, fl_ctx=None):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
_all_pred_labels = []
_all_labels = []
for _i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return None
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
outputs = self.model(inputs)
_, pred_label = torch.max(outputs.data, 1)
total += inputs.data.size()[0]
correct += (pred_label == torch.squeeze(labels.data, axis=1)).sum().item()
_all_pred_labels.extend(pred_label.cpu().numpy())
_all_labels.extend(labels.data.cpu().numpy())
acc_metric = correct / float(total)
if len(np.unique(_all_labels)) == 2:
auc_metric = roc_auc_score(_all_labels, _all_pred_labels)
else:
auc_metric = None
if tb_id:
self.writer.add_scalar(tb_id + "_acc", acc_metric, self.epoch_global)
self.writer.add_scalar(tb_id + "_auc", auc_metric, self.epoch_global)
return acc_metric, auc_metric
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get validation information
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
model_owner = shareable.get(ReservedHeaderKey.HEADERS).get(AppConstants.MODEL_OWNER)
if model_owner:
self.log_info(
fl_ctx,
f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()}",
)
else:
model_owner = "global_model" # evaluating global model during training
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed for {validate_type}") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(
f"No weights loaded for validation for {validate_type}! Received weight dict is {global_weights}"
)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_acc, global_auc = self.local_valid(
self.valid_loader,
abort_signal,
tb_id="val_global_model",
fl_ctx=fl_ctx,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"global_model ({model_owner}): accuracy: {global_acc:.4f}, auc: {global_auc:.4f}",
)
return DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_acc},
meta={},
).to_shareable()
elif validate_type == ValidateType.MODEL_VALIDATE:
if self.test_loader is None:
self.log_warning(fl_ctx, "No test data available. Skipping validation.")
val_results = {}
else:
# perform valid
train_acc, train_auc = self.local_valid(self.train_loader, abort_signal)
self.log_info(
fl_ctx,
f"training acc ({model_owner}): {train_acc:.4f}, auc: {train_auc:.4f}",
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_acc, val_auc = self.local_valid(self.valid_loader, abort_signal)
self.log_info(
fl_ctx,
f"validation acc ({model_owner}): {val_acc:.4f}, auc: {val_auc:.4f}",
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
test_acc, test_auc = self.local_valid(self.test_loader, abort_signal)
self.log_info(
fl_ctx,
f"testing acc ({model_owner}): {test_acc:.4f}, auc: {test_auc:.4f}",
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, "Evaluation finished. Returning shareable.")
val_results = {
"train_accuracy": train_acc,
"val_accuracy": val_acc,
"test_accuracy": test_acc,
"train_auc": train_auc,
"val_auc": val_auc,
"test_auc": test_auc,
}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
def get_lr(self):
"""
This function is used to get the learning rates of the optimizer.
"""
return [group["lr"] for group in self.optimizer.state_dict()["param_groups"]]
| NVFlare-main | research/quantifying-data-leakage/src/nvflare_gradinv/learners/cxr_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.aggregators.assembler import Assembler
from nvflare.app_common.app_constant import AppConstants
class FedCEAssembler(Assembler):
def __init__(self, fedce_mode, model):
super().__init__(data_kind=DataKind.WEIGHT_DIFF)
# mode, plus or times
self.fedce_mode = fedce_mode
self.model = model
self.fedce_cos_param_list = []
# Aggregator needs to keep record of historical
# cosine similarity for FedCM coefficients
self.fedce_cos_sim = {}
self.fedce_coef = {}
def _initialize(self, fl_ctx: FLContext):
# convert str model description to model
if isinstance(self.model, str):
# treat it as model component ID
model_component_id = self.model
engine = fl_ctx.get_engine()
self.model = engine.get_component(model_component_id)
if not self.model:
self.system_panic(
reason=f"cannot find model component '{model_component_id}'",
fl_ctx=fl_ctx,
)
return
if not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason=f"expect model component '{model_component_id}' to be torch.nn.Module but got {type(self.model_selector)}",
fl_ctx=fl_ctx,
)
return
elif self.model and not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason=f"expect model to be torch.nn.Module but got {type(self.model)}",
fl_ctx=fl_ctx,
)
return
# only include params requires_grad for cosine similarity computation
for name, param in self.model.named_parameters():
if param.requires_grad:
self.fedce_cos_param_list.append(name)
self.log_info(fl_ctx, "FedCE model assembler initialized")
def get_model_params(self, dxo: DXO):
data = dxo.data
meta = dxo.meta
return {"model": data, "fedce_minus_val": meta["fedce_minus_val"]}
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def assemble(self, data: Dict[str, dict], fl_ctx: FLContext) -> DXO:
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
site_list = data.keys()
fedce_minus_vals = []
self.fedce_cos_sim[current_round] = {}
for site in site_list:
if current_round == 0:
# round 0, initialize uniform fedce_coef
self.fedce_coef[site] = 1 / len(site_list)
# get minus_val from submissions
fedce_minus_vals.append(data[site]["fedce_minus_val"])
# generate consensus gradient with current FedCE coefficients
consensus_grad = []
global_weights = self.model.state_dict()
for idx, name in enumerate(global_weights):
if name in self.fedce_cos_param_list:
temp = torch.zeros_like(global_weights[name])
for site in site_list:
temp += self.fedce_coef[site] * torch.as_tensor(data[site]["model"][name])
consensus_grad.append(temp.data.view(-1))
# flatten for cosine similarity computation
consensus_grads_vec = torch.cat(consensus_grad).to("cpu")
# generate minus gradients and compute cosine similarity
for site in site_list:
site_grad = []
for name in self.fedce_cos_param_list:
site_grad.append(torch.as_tensor(data[site]["model"][name]).data.view(-1))
site_grads_vec = torch.cat(site_grad).to("cpu")
# minus gradient
minus_grads_vec = consensus_grads_vec - self.fedce_coef[site] * site_grads_vec
# compute cosine similarity
fedce_cos_sim_site = (
torch.cosine_similarity(site_grads_vec, minus_grads_vec, dim=0).detach().cpu().numpy().item()
)
# append to record dict
self.fedce_cos_sim[current_round][site] = fedce_cos_sim_site
# compute cos_weights and minus_vals based on the record for each site
fedce_cos_weights = []
for site in site_list:
# cosine similarity
cos_accu_avg = np.mean([self.fedce_cos_sim[i][site] for i in range(current_round + 1)])
fedce_cos_weights.append(1.0 - cos_accu_avg)
# normalize
fedce_cos_weights /= np.sum(fedce_cos_weights)
fedce_cos_weights = np.clip(fedce_cos_weights, a_min=1e-3, a_max=None)
fedce_minus_vals /= np.sum(fedce_minus_vals)
fedce_minus_vals = np.clip(fedce_minus_vals, a_min=1e-3, a_max=None)
# two aggregation strategies
if self.fedce_mode == "times":
new_fedce_coef = [c_w * mv_w for c_w, mv_w in zip(fedce_cos_weights, fedce_minus_vals)]
elif self.fedce_mode == "plus":
new_fedce_coef = [c_w + mv_w for c_w, mv_w in zip(fedce_cos_weights, fedce_minus_vals)]
else:
raise NotImplementedError
# normalize again
new_fedce_coef /= np.sum(new_fedce_coef)
new_fedce_coef = np.clip(new_fedce_coef, a_min=1e-3, a_max=None)
# update fedce_coef
fedce_coef = {}
idx = 0
for site in site_list:
fedce_coef[site] = new_fedce_coef[idx]
idx += 1
# compute global model update with the new fedce weights
global_updates = {}
for idx, name in enumerate(global_weights):
temp = torch.zeros_like(global_weights[name], dtype=torch.float32)
for site in site_list:
weight = fedce_coef[site]
temp += weight * data[site]["model"][name]
global_updates[name] = temp.detach().cpu().numpy()
meta = {"fedce_coef": fedce_coef}
dxo = DXO(data_kind=self.expected_data_kind, data=global_updates, meta=meta)
return dxo
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/aggregators/fedce_assembler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def custom_client_datalist_json_path(datalist_json_path: str, client_id: str) -> str:
"""
Customize datalist_json_path for each client
Args:
datalist_json_path: root path containing all jsons
client_id: e.g., site-2
"""
# Customize datalist_json_path for each client
datalist_json_path_client = os.path.join(
datalist_json_path,
client_id + ".json",
)
return datalist_json_path_client
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/utils/custom_client_datalist_json_path.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import torch
from learners.supervised_monai_prostate_learner import SupervisedMonaiProstateLearner
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedMonaiProstateFedCELearner(SupervisedMonaiProstateLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Trainer for prostate segmentation task. It inherits from MONAI trainer.
Args:
train_config_filename: directory of config_3 file.
fedsm_select_epochs: the number of training epochs of selector model. Defaults to 1.
aggregation_epochs: the number of training epochs of both global and personalized models for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
SupervisedMonaiProstateLearner.__init__(
self,
train_config_filename=train_config_filename,
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.fedce_cos_sim = {}
self.fedce_minus_val = {}
self.model_last_round = None
def train_config(self, fl_ctx: FLContext):
# initialize superclass
SupervisedMonaiProstateLearner.train_config(self, fl_ctx)
# initialize last round model record
self.model_last_round = copy.deepcopy(self.model)
def get_minus_model(self, global_model, last_round_model, fedce_weight):
minus_model = copy.deepcopy(global_model)
for key in minus_model.state_dict().keys():
temp = (global_model.state_dict()[key] - fedce_weight * last_round_model.state_dict()[key]) / (
1 - fedce_weight
)
minus_model.state_dict()[key].data.copy_(temp)
return minus_model
def reshape_global_weights(self, local_var_dict: dict, global_weights: dict):
model_keys = global_weights.keys()
n_loaded = 0
# tensors might need to be reshaped to support HE for secure aggregation.
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
if n_loaded == 0:
raise ValueError(f"No global weights loaded! Received weight dict is {global_weights}")
return local_var_dict
def compute_model_diff(self, initial_model: dict, end_model: dict, fl_ctx: FLContext):
model_diff = {}
for name in initial_model:
if name not in end_model:
continue
model_diff[name] = np.subtract(end_model[name].cpu().numpy(), initial_model[name])
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
return model_diff
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Training task pipeline for FedSM
Get global/client/selector model weights (potentially with HE)
Local training all three models
Return updated weights of all three models (model_diff)
together with the optimizer parameters of selector (model)
"""
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
client_id = fl_ctx.get_identity_name()
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {client_id}")
# get global model and FedCE coefficients from received dxo
dxo = from_shareable(shareable)
# global model weights
global_weights = dxo.data
local_var_dict = self.model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, global_weights)
# load global model weights to local model
self.model.load_state_dict(local_var_dict)
# as part of FedCE training, minus model validation is needed
# before local training from global model
# from second round
if current_round > 0:
# get FedCE coefficient
fedce_coef = dxo.meta["fedce_coef"][client_id]
# get fedce_minus model
fedce_minus_model = self.get_minus_model(
self.model,
self.model_last_round,
fedce_coef,
)
# validate minus model
minus_metric = self.local_valid(
fedce_minus_model,
self.valid_loader,
abort_signal,
tb_id="val_metric_minus_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_minus_model: {minus_metric:.4f}")
# add to the record dict
self.fedce_minus_val[current_round] = minus_metric
else:
fedce_coef = 0.0
self.fedce_minus_val[0] = 0.0
self.writer.add_scalar("FedCE_Coef", fedce_coef, current_round)
# local training from global weights
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
abort_signal=abort_signal,
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# compute delta models, initial models has the primary key set
local_weights = self.model.state_dict()
model_diff_global = self.compute_model_diff(global_weights, local_weights, fl_ctx)
# update model_last_round
self.model_last_round.load_state_dict(local_weights)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff_global)
# use the historical mean of minus_val for FedCE
minus_val = 1.0 - np.mean([self.fedce_minus_val[i] for i in range(current_round + 1)])
dxo.set_meta_prop("fedce_minus_val", minus_val)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Validation task pipeline for FedSM
Validate all three models: global/personal/selector
Return validation score for server-end best model selection and record
"""
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
# validation on models from server
# renamed to "models_from_server" to avoid confusion with "global_model"
model_owner = "models_from_server"
# update local model weights with received dxo
dxo = from_shareable(shareable)
# load global model weights
global_weights = dxo.data
local_var_dict = self.model.state_dict()
local_var_dict = self.reshape_global_weights(local_var_dict, global_weights)
self.model.load_state_dict(local_var_dict)
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/learners/supervised_monai_prostate_fedce_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""Simple Supervised Trainer.
This provides the basic functionality of a local learner: perform before-train validation on
global model at the beginning of each round, perform local training, and send the updated weights.
No model will be saved locally, tensorboard record for local loss and global model validation score.
Enabled FedAvg
Args:
train_config_filename: directory of config_3 file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.best_metric = 0.0
self.client_id = None
self.writer = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when a run starts, this is where the actual settings get initialized for trainer
# set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_dir = ws.get_app_dir(fl_ctx.get_job_id())
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# set local tensorboard writer for local validation score of global model
self.writer = SummaryWriter(app_dir)
# set the training-related contexts, this is task-specific
self.train_config(fl_ctx)
@abstractmethod
def train_config(self, fl_ctx: FLContext):
"""Traning configurations customized to individual tasks
This can be specified / loaded in any ways
as long as they are made available for further training and validation
some potential items include but not limited to:
self.lr
self.model
self.device
self.optimizer
self.criterion
self.transform_train
self.transform_valid
self.transform_post
self.train_loader
self.valid_loader
self.inferer
self.valid_metric
"""
raise NotImplementedError
@abstractmethod
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
abort_signal: Signal,
current_round,
):
"""Typical training logic
Total local epochs: self.aggregation_epochs
Load data pairs from train_loader: image / label
Compute outputs with self.model
Compute loss with self.criterion
Update model
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.model.train()
epoch_len = len(train_loader)
epoch_global = current_round * self.aggregation_epochs + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
current_step = epoch_len * epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
def local_valid(
self,
model,
valid_loader,
abort_signal: Signal,
tb_id=None,
current_round=None,
):
"""Typical validation logic
Load data pairs from train_loader: image / label
Compute outputs with self.model
Perform post transform (binarization, etc.)
Compute evaluation metric with self.valid_metric
Add score to tensorboard record with specified id
"""
model.eval()
with torch.no_grad():
metric = 0
for i, batch_data in enumerate(valid_loader):
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_images = batch_data["image"].to(self.device)
val_labels = batch_data["label"].to(self.device)
# Inference
val_outputs = self.inferer(val_images, model)
val_outputs = self.transform_post(val_outputs)
# Compute metric
metric_score = self.valid_metric(y_pred=val_outputs, y=val_labels)
metric += metric_score.item()
# compute mean dice over whole validation set
metric /= len(valid_loader)
# tensorboard record id, add to record if provided
if tb_id:
self.writer.add_scalar(tb_id, metric, current_round)
return metric
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""Typical training task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Local training
Return updated weights (model_diff)
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
abort_signal=abort_signal,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Typical validation task pipeline with potential HE functionality
Get global model weights (potentially with HE)
Validation on local data
Return validation score
"""
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
# validation on global model
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
# before_train_validate only, can extend to other validate types
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_metric = self.local_valid(
self.model,
self.valid_loader,
abort_signal,
tb_id="val_metric_global_model",
current_round=current_round,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_metric_global_model ({model_owner}): {global_metric:.4f}")
# validation metrics will be averaged with weights at server end for best model record
metric_dxo = DXO(
data_kind=DataKind.METRICS,
data={MetaKey.INITIAL_METRICS: global_metric},
meta={},
)
metric_dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, len(self.valid_loader))
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/learners/supervised_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
import torch.optim as optim
from learners.supervised_learner import SupervisedLearner
from monai.data import CacheDataset, DataLoader, Dataset, load_decathlon_datalist
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.transforms import (
Activations,
AsDiscrete,
AsDiscreted,
Compose,
EnsureChannelFirstd,
EnsureType,
EnsureTyped,
LoadImaged,
Resized,
ScaleIntensityRanged,
)
from networks.unet import UNet
from utils.custom_client_datalist_json_path import custom_client_datalist_json_path
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
class SupervisedMonaiProstateLearner(SupervisedLearner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
):
"""MONAI Learner for prostate segmentation task.
It inherits from SupervisedLearner.
Args:
train_config_filename: path for config_3 file, this is an addition term for config_3 loading
aggregation_epochs: the number of training epochs for a round.
train_task_name: name of the task to train the model.
Returns:
a Shareable with the updated local model after running `execute()`
"""
super().__init__(
aggregation_epochs=aggregation_epochs,
train_task_name=train_task_name,
)
self.train_config_filename = train_config_filename
self.config_info = None
self.lr = None
self.model = None
self.device = None
self.optimizer = None
self.criterion = None
self.transform = None
self.transform_post = None
self.train_loader = None
self.valid_loader = None
self.inferer = None
self.valid_metric = None
def train_config(self, fl_ctx: FLContext):
"""MONAI traning configuration
Here, we use a json to specify the needed parameters
"""
# Load training configurations json
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_job_id())
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
self.config_info = json.load(file)
# Get the config_info
self.lr = self.config_info["learning_rate"]
batch_size = self.config_info["batch_size"]
cache_rate = self.config_info["cache_dataset"]
dataset_base_dir = self.config_info["dataset_base_dir"]
datalist_json_path = self.config_info["datalist_json_path"]
# Get datalist json
datalist_json_path = custom_client_datalist_json_path(datalist_json_path, self.client_id)
# Set datalist
train_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="training",
base_dir=dataset_base_dir,
)
valid_list = load_decathlon_datalist(
data_list_file_path=datalist_json_path,
is_segmentation=True,
data_list_key="validation",
base_dir=dataset_base_dir,
)
self.log_info(
fl_ctx,
f"Training Size: {len(train_list)}, Validation Size: {len(valid_list)}",
)
# Set the training-related context
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = UNet(
in_channels=1,
out_channels=1,
).to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.criterion = DiceLoss(sigmoid=True)
self.transform = Compose(
[
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
ScaleIntensityRanged(keys=["image", "label"], a_min=0, a_max=255, b_min=0.0, b_max=1.0),
Resized(
keys=["image", "label"],
spatial_size=(256, 256),
mode=("bilinear"),
align_corners=True,
),
AsDiscreted(keys=["label"], threshold=0.5),
EnsureTyped(keys=["image", "label"]),
]
)
self.transform_post = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
# Set dataset
if cache_rate > 0.0:
train_dataset = CacheDataset(
data=train_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=0,
)
valid_dataset = CacheDataset(
data=valid_list,
transform=self.transform,
cache_rate=cache_rate,
num_workers=0,
)
else:
train_dataset = Dataset(
data=train_list,
transform=self.transform,
)
valid_dataset = Dataset(
data=valid_list,
transform=self.transform,
)
self.train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0,
)
self.valid_loader = DataLoader(
valid_dataset,
batch_size=1,
shuffle=False,
num_workers=0,
)
# Set inferer and evaluation metric
self.inferer = SimpleInferer()
self.valid_metric = DiceMetric(include_background=False, reduction="mean", get_not_nans=False)
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/learners/supervised_monai_prostate_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.nn as nn
class UNet(nn.Module):
def __init__(self, in_channels=3, out_channels=1, init_features=32):
super(UNet, self).__init__()
features = init_features
self.encoder1 = UNet._block(in_channels, features, name="enc1")
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder2 = UNet._block(features, features * 2, name="enc2")
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder3 = UNet._block(features * 2, features * 4, name="enc3")
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.encoder4 = UNet._block(features * 4, features * 8, name="enc4")
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck")
self.upconv4 = nn.ConvTranspose2d(features * 16, features * 8, kernel_size=2, stride=2)
self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(features * 8, features * 4, kernel_size=2, stride=2)
self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(features * 4, features * 2, kernel_size=2, stride=2)
self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(features * 2, features, kernel_size=2, stride=2)
self.decoder1 = UNet._block(features * 2, features, name="dec1")
self.conv = nn.Conv2d(in_channels=features, out_channels=out_channels, kernel_size=1)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
return self.conv(dec1)
@staticmethod
def _block(in_channels, features, name):
return nn.Sequential(
OrderedDict(
[
(
name + "_conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "_bn1", nn.BatchNorm2d(num_features=features)),
(name + "_relu1", nn.ReLU(inplace=True)),
(
name + "_conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
kernel_size=3,
padding=1,
bias=False,
),
),
(name + "_bn2", nn.BatchNorm2d(num_features=features)),
(name + "_relu2", nn.ReLU(inplace=True)),
]
)
)
| NVFlare-main | research/fed-ce/jobs/fedce_prostate/app/custom/networks/unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from argparse import ArgumentParser
from pathlib import Path
import nibabel as nib
import numpy as np
from tqdm.contrib import tzip
TASK_FILE_ID = {"liver": "LVR", "spleen": "SPL", "pancreas": "PAN", "kidney": "KITS"}
TASK_LABEL_MAP = {
"liver": {0: 0, 1: 1, 2: 2},
"spleen": {0: 0, 1: 3},
"pancreas": {0: 0, 1: 4, 2: 5},
"kidney": {0: 0, 1: 6, 2: 7},
}
DEFAULT_DATA_LIST = {
"liver": "data/Liver/datalist.json",
"spleen": "data/Spleen/datalist.json",
"pancreas": "data/Pancreas/datalist.json",
"kidney": "data/KiTS19/datalist.json",
}
def map_values(data: np.ndarray, task: str) -> np.ndarray:
data = data.astype(np.uint8)
m = TASK_LABEL_MAP[task]
f = np.vectorize(lambda x: m[x])
return f(data).astype(np.uint8)
def convert_msd_dataset(src: str, dst: str, task: str) -> None:
if not Path(src).is_dir():
raise ValueError(f"source path {src} must be a directory.")
images = [str(f) for f in Path(src).glob("imagesTr/*.gz")]
assert len(images) > 0
labels = [img.replace("imagesTr", "labelsTr") for img in images]
Path(dst).mkdir(parents=True, exist_ok=True)
for src_img, src_seg in tzip(images, labels):
# Generate image file name
dst_img = "IM_" + TASK_FILE_ID[task] + "_" + src_img.split("_")[-1]
dst_img = str(Path(dst) / dst_img)
# Just copy image
shutil.copy(src_img, dst_img)
# Generate label file name
dst_seg = "LB_" + TASK_FILE_ID[task] + "_" + src_seg.split("_")[-1]
dst_seg = str(Path(dst) / dst_seg)
# Remap labels
seg = nib.load(src_seg)
seg_data = np.asanyarray(seg.dataobj)
seg_data = map_values(seg_data, task=task)
seg = nib.Nifti1Image(seg_data, seg.affine)
# Save new label
nib.save(seg, dst_seg)
# Copy datalist.json to dst if necessary
dst_list_path = str(Path(dst) / "datalist.json")
try:
shutil.copy(DEFAULT_DATA_LIST[task], dst_list_path)
except shutil.SameFileError:
pass
def convert_kits_dataset(src: str, dst: str, task: str) -> None:
if not Path(src).is_dir():
raise ValueError(f"source path {src} must be a directory.")
labels = [str(f) for f in Path(src).glob("*/segmentation.nii.gz")]
assert len(labels) > 0
images = [f.replace("segmentation.nii", "imaging.nii") for f in labels]
Path(dst).mkdir(parents=True, exist_ok=True)
for src_img, src_seg in tzip(images, labels):
case_id = Path(src_img).parent.name.replace("case_00", "")
# Generate new file name and copy image to dst
dst_img = str(Path(dst) / f"IM_KITS_{case_id}.nii.gz")
shutil.copy(src_img, dst_img)
# Generate label file name
dst_seg = str(Path(dst) / f"LB_KITS_{case_id}.nii.gz")
# Remap labels
seg = nib.load(src_seg)
seg_data = np.asanyarray(seg.dataobj)
seg_data = map_values(seg_data, task=task)
seg = nib.Nifti1Image(seg_data, seg.affine)
# Save new label
nib.save(seg, dst_seg)
# Copy datalist.json to dst if necessary
dst_list_path = str(Path(dst) / "datalist.json")
try:
shutil.copy(DEFAULT_DATA_LIST[task], dst_list_path)
except shutil.SameFileError:
pass
def main(args) -> None:
if args.task in ["liver", "pancreas", "spleen"]:
convert_msd_dataset(args.src, args.dst, args.task)
else:
convert_kits_dataset(args.src, args.dst, args.task)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--task",
"-t",
type=str,
choices=["kidney", "liver", "pancreas", "spleen"],
help="Choose which dataset to process.",
)
parser.add_argument("--src", "-s", type=str, help="Path to the dataset root directory.")
parser.add_argument("--dst", "-d", type=str, help="Path to the output dataset directory.")
args = parser.parse_args()
main(args)
| NVFlare-main | research/condist-fl/prepare_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from argparse import ArgumentParser
from pathlib import Path
import torch
from src.utils.get_model import get_model
def run_convert(args):
if not Path(args.config).exists():
raise ValueError(f"Config file {args.config} does not exists.")
if not Path(args.weights).exists():
raise ValueError(f"Checkpoint file {args.weights} does not exists.")
app = "app_" + args.app
if app == "app_server":
with open(args.config) as f:
config = json.load(f)
config = [c for c in config["components"] if c["id"] == "model"][0]
config["name"] = config["path"].split(".")[-1]
config["path"] = ".".join(config["path"].split(".")[:-1])
else:
with open(args.config) as f:
config = json.load(f)
config = config["model"]
ckpt = torch.load(args.weights)
state_dict = ckpt["model"]
model = get_model(config)
model.load_state_dict(state_dict)
model = model.cuda().eval()
sample_data = torch.rand([1, 1, 224, 224, 64]).cuda()
traced_module = torch.jit.trace(model, sample_data)
torch.jit.save(traced_module, args.output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", "-c", type=str, help="Path to the config file.")
parser.add_argument("--weights", "-w", type=str, help="Path to the saved model checkpoint.")
parser.add_argument(
"--app",
"-a",
type=str,
choices=["server", "kidney", "liver", "pancreas", "spleen"],
help="Select app to convert checkpoint.",
)
parser.add_argument("--output", "-o", type=str, help="Output result JSON.")
args = parser.parse_args()
run_convert(args)
| NVFlare-main | research/condist-fl/convert_to_torchscript.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from argparse import ArgumentParser
import numpy as np
import torch
from monai.inferers import SlidingWindowInferer
from monai.transforms import AsDiscrete, EnsureChannelFirst, LoadImage, SaveImage, Spacing
class ImageDataset(object):
def __init__(self, data_root: str, data_list: str, data_list_key: str):
with open(data_list) as f:
data = json.load(f).get(data_list_key, [])
self.data = [os.path.join(data_root, d["image"]) for d in data]
self.index = 0
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
class DataProcessor(object):
def __init__(self, i_min: float, i_max: float, mean: float, std: float, output_dir: str) -> None:
self.i_min = i_min
self.i_max = i_max
self.mean = mean
self.std = std
self.reader = LoadImage(image_only=True)
self.channel = EnsureChannelFirst()
self.spacing = Spacing(pixdim=[1.44, 1.44, 2.87])
self.post = AsDiscrete(argmax=True)
self.writer = SaveImage(
output_dir=output_dir, output_postfix="seg", output_dtype=np.uint8, separate_folder=False, resample=True
)
def preprocess(self, input_file_name: str) -> torch.Tensor:
image = self.reader(input_file_name)
image = self.channel(image)
image = image.cuda()
image = self.spacing(image)
# Use inplace operations to avoid Tensor creation
image = image.clip_(self.i_min, self.i_max)
image.add_(-self.mean)
image.div_(self.std)
return image
def postprocess(self, seg: torch.Tensor) -> None:
seg = self.post(seg)
self.writer(seg)
def main(args):
data = ImageDataset(args.data_root, args.data_list, args.data_list_key)
dp = DataProcessor(i_min=-54.0, i_max=258.0, mean=100.0, std=50.0, output_dir=args.output)
inferer = SlidingWindowInferer(roi_size=[224, 224, 64], mode="gaussian", sw_batch_size=1, overlap=0.50)
model = torch.jit.load(args.model)
model = model.eval().cuda()
with torch.cuda.amp.autocast():
with torch.no_grad():
for image in iter(data):
image = dp.preprocess(input_file_name=image)
image = image.view(1, *image.shape)
output = inferer(image, model)
output = torch.squeeze(output, dim=0)
dp.postprocess(output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--data_root", "-r", type=str, help="Path to data root directory.")
parser.add_argument("--data_list", "-l", type=str, help="Path to data list file.")
parser.add_argument("--data_list_key", "-k", type=str, help="Target data split key in data list.")
parser.add_argument("--model", "-m", type=str, help="Path to model torchscript file.")
parser.add_argument("--output", "-o", type=str, help="Output directory.")
args = parser.parse_args()
main(args)
| NVFlare-main | research/condist-fl/run_infer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from argparse import ArgumentParser
from pathlib import Path
import numpy as np
import torch
from src.data import DataManager
from src.utils.get_model import get_model
from src.validator import Validator
def load_ckpt(app: str, model: torch.nn.Module, ckpt_path: str):
ckpt = torch.load(ckpt_path)
state_dict = ckpt["model"]
model.load_state_dict(state_dict)
return model
def run_validation(args):
ws = Path(args.workspace)
if not ws.exists():
raise ValueError(f"Workspace path {ws} does not exists.")
prefix = ws / "simulate_job"
server = "app_server"
clients = [p.name for p in prefix.glob("app_*") if "server" not in p.name]
# Collect all checkpoints to evaluate
checkpoints = {"app_server": str(prefix / "app_server/best_FL_global_model.pt")}
checkpoints.update({c: str(prefix / c / "models/best_model.pt") for c in clients})
# Collect configs from clients
config = {
c: {"data": str(prefix / c / "config/config_data.json"), "task": str(prefix / c / "config/config_task.json")}
for c in clients
}
metrics = {app: {} for app in [server] + clients}
for client in clients:
with open(config[client]["data"]) as f:
data_config = json.load(f)
with open(config[client]["task"]) as f:
task_config = json.load(f)
print(f"Loading test cases from {client}'s dataset.")
dm = DataManager(str(prefix), data_config)
dm.setup("test")
# Create model & validator from task config
model = get_model(task_config["model"])
validator = Validator(task_config)
# Run server validation
model = load_ckpt("app_server", model, checkpoints["app_server"])
model = model.eval().cuda()
print("Start validation using global best model.")
raw_metrics = validator.run(model, dm.get_data_loader("test"))
raw_metrics.pop("val_meandice")
metrics["app_server"].update(raw_metrics)
# Run client validation
for c in clients:
model = load_ckpt(c, model, checkpoints[c])
model = model.eval().cuda()
print(f"Start validation using {c}'s best model.")
raw_metrics = validator.run(model, dm.get_data_loader("test"))
raw_metrics.pop("val_meandice")
metrics[c].update(raw_metrics)
# Calculate correct val_meandice
for site in metrics:
metrics[site]["val_meandice"] = np.mean([val for _, val in metrics[site].items()])
with open(args.output, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--workspace", "-w", type=str, help="Workspace path.")
parser.add_argument("--output", "-o", type=str, help="Output result JSON.")
args = parser.parse_args()
run_validation(args)
| NVFlare-main | research/condist-fl/run_validate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey, ReservedKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.widgets.widget import Widget
class GlobalMetricLogger(Widget):
def __init__(
self,
log_dir: str = "logs",
log_name: str = "key_metric",
val_metric_name: str = MetaKey.INITIAL_METRICS,
aggregation_weights: Optional[Dict] = None,
):
super().__init__()
self.log_dir = log_dir
self.log_name = log_name
self.val_metric_name = val_metric_name
self.aggregation_weights = aggregation_weights
self.writer = None
self.logger.info(f"metric logger weights control: {aggregation_weights}")
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._startup(fl_ctx)
elif event_type == AppEventType.ROUND_STARTED:
self._reset_metrics()
elif event_type == AppEventType.BEFORE_CONTRIBUTION_ACCEPT:
self._before_accept(fl_ctx)
elif event_type == AppEventType.BEFORE_AGGREGATION:
self._before_aggregate(fl_ctx)
elif event_type == EventType.END_RUN:
self._shutdown(fl_ctx)
def _reset_metrics(self):
self.val_metric_sum = 0
self.val_metric_weights = 0
def _startup(self, fl_ctx: FLContext):
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
log_path = os.path.join(app_root, self.log_dir)
if not os.path.exists(log_path):
os.makedirs(log_path, exist_ok=True)
self._reset_metrics()
self.writer = SummaryWriter(log_dir=log_path)
def _before_accept(self, fl_ctx: FLContext):
peer_ctx = fl_ctx.get_peer_context()
shareable = peer_ctx.get_prop(FLContextKey.SHAREABLE)
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS, DataKind.COLLECTION):
self.log_debug(fl_ctx, "DXO kind is not valid for logging")
return
if dxo.data is None:
self.log_debug(fl_ctx, "No data in DXO")
return
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)
client_name = shareable.get_peer_prop(ReservedKey.IDENTITY_NAME, default="?")
if current_round == 0:
self.log_debug(fl_ctx, "Skip the first round.")
return
if contribution_round != current_round:
self.log_warning(
fl_ctx, f"Discard round {contribution_round} metrics from {client_name} at round {current_round}"
)
return
val_metric = dxo.get_meta_prop(self.val_metric_name)
if val_metric is None:
self.log_debug(fl_ctx, f"Metric {self.val_metric_name} does not exists.")
return
else:
self.log_info(fl_ctx, f"Received validation metric {val_metric} from {client_name}.")
client_weight = self.aggregation_weights.get(client_name, 1.0)
self.val_metric_sum += val_metric * client_weight
self.val_metric_weights += client_weight
def _before_aggregate(self, fl_ctx: FLContext):
if self.val_metric_weights == 0:
self.log_debug(fl_ctx, "nothing accumulated")
return
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.writer.add_scalar(self.log_name, self.val_metric_sum / self.val_metric_weights, current_round)
self.log_info(fl_ctx, f"Write metric summary for round {current_round}.")
self._reset_metrics()
def _shutdown(self, fl_ctx: FLContext):
self.writer.close()
| NVFlare-main | research/condist-fl/jobs/condist/server/custom/metric_logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from typing import Any, Dict
import numpy as np
import torch
from nvflare.apis.dxo import DataKind, from_bytes
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.formatter import Formatter
from nvflare.app_common.app_constant import AppConstants
def array_to_list(data: Any) -> Any:
if isinstance(data, torch.Tensor) or isinstance(data, np.ndarray):
return data.tolist()
return data
def simplify_metrics(metrics: Dict[str, Any]) -> Dict[str, Any]:
return {k: array_to_list(v) for k, v in metrics}
class SimpleFormatter(Formatter):
def __init__(self) -> None:
super().__init__()
self.results = {}
def format(self, fl_ctx: FLContext) -> str:
# Get validation result
validation_shareables_dict = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT, {})
result = {}
try:
# Extract results from all clients
for data_client in validation_shareables_dict.keys():
validation_dict = validation_shareables_dict[data_client]
if validation_dict:
res[data_client] = {}
for model_name in validation_dict.keys():
dxo_path = validation_dict[model_name]
# Load the shareable
with open(dxo_path, "rb") as f:
metric_dxo = from_bytes(f.read())
# Get metrics from shareable
if metric_dxo and metric_dxo.data_kind == DataKind.METRICS:
metrics = simplify_metrics(metric_dxo.data)
res[data_client][model_name] = metrics
# add any results
self.results.update(res)
except Exception as e:
traceback.print_exc()
return repr(result)
| NVFlare-main | research/condist-fl/jobs/condist/server/custom/simple_formatter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
from typing import Union
from ruamel.yaml import YAML
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.widgets.widget import Widget
class ReportGenerator(Widget):
ALLOWED_FILE_EXTENSIONS = [".yaml", ".yml", ".json"]
def __init__(
self,
results_dir: Union[str, Path] = AppConstants.CROSS_VAL_DIR,
report_path: Union[str, Path] = "cross_val_results.yaml",
):
super(ReportGenerator, self).__init__()
self.results_dir = Path(results_dir)
self.report_path = Path(report_path)
if self.report_path.suffix not in ReportGenerator.ALLOWED_FILE_EXTENSIONS:
raise ValueError(f"Report file extension must be be .yaml, .yml, or .json, got {self.report_path.suffix}")
self.val_results = []
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.val_results.clear()
elif event_type == AppEventType.VALIDATION_RESULT_RECEIVED:
model_owner = fl_ctx.get_prop(AppConstants.MODEL_OWNER, None)
data_client = fl_ctx.get_prop(AppConstants.DATA_CLIENT, None)
val_results = fl_ctx.get_prop(AppConstants.VALIDATION_RESULT, None)
if not model_owner:
self.log_error(fl_ctx, "Unknown model owner, validation result will not be saved", fire_event=False)
if not data_client:
self.log_error(fl_ctx, "Unknown data client, validation result will not be saved", fire_event=False)
if val_results:
try:
dxo = from_shareable(val_results)
dxo.validate()
if dxo.data_kind == DataKind.METRICS:
self.val_results.append(
{"data_client": data_client, "model_owner": model_owner, "metrics": dxo.data}
)
else:
self.log_error(
fl_ctx, f"Expected dxo of kind METRICS but got {dxo.data_kind}", fire_event=False
)
except:
self.log_exception(fl_ctx, "Exception in handling validation result", fire_event=False)
elif event_type == EventType.END_RUN:
ws = fl_ctx.get_engine().get_workspace()
run_dir = Path(ws.get_run_dir(fl_ctx.get_job_id()))
output_dir = run_dir / self.results_dir
if not output_dir.exists():
output_dir.mkdir(parents=True)
results = {"val_results": self.val_results}
output_file_path = output_dir / self.report_path
if self.report_path.suffix == ".json":
with open(output_file_path, "w") as f:
json.dump(results, f)
else: # ".yaml" or ".yml"
yaml = YAML()
with open(output_file_path, "w") as f:
yaml.dump(results, f)
| NVFlare-main | research/condist-fl/jobs/condist/server/custom/report_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
from typing import List
import torch
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_common.app_constant import DefaultCheckpointFileName
from nvflare.app_common.pt.pt_fed_utils import PTModelPersistenceFormatManager
class SimpleModelLocator(ModelLocator):
SERVER_MODEL_NAME = "server"
SERVER_BEST_MODEL_NAME = "server_best"
def __init__(
self,
model_dir="app_server",
model_name=DefaultCheckpointFileName.GLOBAL_MODEL,
best_model_name=DefaultCheckpointFileName.BEST_GLOBAL_MODEL,
):
super().__init__()
self.model_dir = model_dir
self.model_file_name = model_name
self.best_model_file_name = best_model_name
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [SimpleModelLocator.SERVER_MODEL_NAME, SimpleModelLocator.SERVER_BEST_MODEL_NAME]
def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:
dxo = None
engine = fl_ctx.get_engine()
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
if model_name in self.get_model_names(fl_ctx):
# Get run information
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = engine.get_workspace().get_run_dir(run_number)
model_path = os.path.join(run_dir, self.model_dir)
# Generate model path
if model_name == SimpleModelLocator.SERVER_BEST_MODEL_NAME:
model_load_path = os.path.join(model_path, self.best_model_file_name)
else:
model_load_path = os.path.join(model_path, self.model_file_name)
# Load checkpoint
model_data = None
try:
checkpoint = torch.load(model_load_path, map_location="cpu")
model_data = checkpoint["model"]
for var_name in model_data:
w = model_data[var_name]
if isinstance(w, torch.Tensor):
model_data[var_name] = w.numpy()
except:
self.log_error(fl_ctx, traceback.format_exc())
if model_data is not None:
mgr = PTModelPersistenceFormatManager(model_data)
dxo = DXO(data_kind=DataKind.WEIGHTS, data=mgr.var_dict, meta=mgr.meta)
return dxo
| NVFlare-main | research/condist-fl/jobs/condist/server/custom/model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
import torch
from monai.inferers import SlidingWindowInferer
from monai.metrics import DiceMetric
from monai.transforms import AsDiscreted
from torch.utils.data import DataLoader
from tqdm import tqdm
def get_fg_classes(fg_idx, classes):
out = {}
for idx in fg_idx:
out[classes[idx]] = idx
return out
class Validator(object):
def __init__(self, task_config: Dict):
roi_size = task_config["inferer"]["roi_size"]
sw_batch_size = task_config["inferer"]["sw_batch_size"]
self.num_classes = len(task_config["classes"])
self.fg_classes = get_fg_classes(task_config["condist_config"]["foreground"], task_config["classes"])
self.inferer = SlidingWindowInferer(
roi_size=roi_size, sw_batch_size=sw_batch_size, mode="gaussian", overlap=0.5
)
self.post = AsDiscreted(
keys=["preds", "label"], argmax=[True, False], to_onehot=[self.num_classes, self.num_classes], dim=1
)
self.metric = DiceMetric(reduction="mean_batch")
def validate_step(self, model: torch.nn.Module, batch: Dict[str, Any]) -> None:
batch["image"] = batch["image"].to("cuda:0")
batch["label"] = batch["label"].to("cuda:0")
# Run inference
batch["preds"] = self.inferer(batch["image"], model)
# Post processing
batch = self.post(batch)
# calculate metrics
self.metric(batch["preds"], batch["label"])
def validate_loop(self, model, data_loader) -> Dict[str, Any]:
# Run inference over whole validation set
with torch.no_grad():
with torch.cuda.amp.autocast():
for batch in tqdm(data_loader, desc="Validation DataLoader", dynamic_ncols=True):
self.validate_step(model, batch)
# Collect metrics
raw_metrics = self.metric.aggregate()
self.metric.reset()
mean = 0.0
metrics = {}
for organ, idx in self.fg_classes.items():
mean += raw_metrics[idx]
metrics["val_meandice_" + organ] = raw_metrics[idx]
metrics["val_meandice"] = mean / len(self.fg_classes)
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
metrics[k] = v.tolist()
return metrics
def run(self, model: torch.nn.Module, data_loader: DataLoader) -> Dict[str, Any]:
model.eval()
return self.validate_loop(model, data_loader)
| NVFlare-main | research/condist-fl/src/validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import traceback
from pathlib import Path
from typing import Dict, Literal, Optional
import numpy as np
from data import DataManager
from prettytable import PrettyTable
from torch.utils.tensorboard import SummaryWriter
from trainer import ConDistTrainer
from utils.get_model import get_model
from utils.model_weights import extract_weights, load_weights
from validator import Validator
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReturnCode, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
class ConDistLearner(Learner):
def __init__(
self,
task_config: str,
data_config: str,
aggregation_steps: int,
method: Literal["ConDist"] = "ConDist",
seed: Optional[int] = None,
max_retry: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
submit_model_task_name: str = AppConstants.TASK_SUBMIT_MODEL,
):
super().__init__()
self.task_config = task_config
self.data_config = data_config
self.aggregation_steps = aggregation_steps
self._method = method
self._seed = seed
self._max_retry = max_retry
self.train_task_name = train_task_name
self.submit_model_task_name = submit_model_task_name
def initialize(self, parts: Dict, fl_ctx: FLContext) -> None:
self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
# Load configurations
prefix = Path(self.app_root)
with open(prefix / self.task_config) as f:
task_config = json.load(f)
with open(prefix / self.data_config) as f:
data_config = json.load(f)
# Initialize variables
self.key_metric = "val_meandice"
self.best_metric = -np.inf
self.best_model_path = "models/best_model.pt"
self.last_model_path = "models/last.pt"
# Create data manager
self.dm = DataManager(self.app_root, data_config)
# Create model
self.model = get_model(task_config["model"])
# Configure trainer & validator
if self._method == "ConDist":
self.trainer = ConDistTrainer(task_config)
self.validator = Validator(task_config)
# Create logger
self.tb_logger = SummaryWriter(log_dir=prefix / "logs")
def train(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Log training info
num_rounds = data.get_header(AppConstants.NUM_ROUNDS)
current_round = data.get_header(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{num_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# Make a copy of model weight for weight diff calculation
dxo = from_shareable(data)
global_weights = dxo.data
# Create dataset & data loader (if necessary)
if self.dm.get_data_loader("train") is None:
self.dm.setup("train")
if self.dm.get_data_loader("validate") is None:
self.dm.setup("validate")
# Run training
for i in range(self._max_retry + 1):
try:
self.trainer.run(
self.model,
self.dm.get_data_loader("train"),
num_steps=self.aggregation_steps,
logger=self.tb_logger,
)
break
except Exception as e:
if i < self._max_retry:
self.log_warning(fl_ctx, f"Someting wrong in training, retrying ({i+1}/{self._max_retry}).")
# Restore trainer states to the beginning of the round
if os.path.exists(self.last_model_path):
self.trainer.load_checkpoint(self.last_model_path, self.model)
load_weights(self.model, global_weights)
self.model = self.model.to("cuda:0")
# Reset dataset & dataloader
self.dm._data_loader["train"] = None
self.dm._dataset["train"] = None
self.dm.setup("train")
else:
raise RuntimeError(traceback.format_exc())
# Run validation
for i in range(self._max_retry + 1):
try:
metrics = self.validator.run(self.model, self.dm.get_data_loader("validate"))
break
except Exception as e:
if i < self._max_retry:
self.log_warning(fl_ctx, f"Someting wrong in training, retrying ({i+1}/{self._max_retry}).")
# Reset dataset & dataloader
self.dm._data_loader["validate"] = None
self.dm._dataset["validate"] = None
self.dm.setup("validate")
else:
raise RuntimeError(traceback.format_exc())
# Log validation results
table = PrettyTable()
table.field_names = ["Metric", "Value"]
for m, v in metrics.items():
table.add_row([m, v])
self.tb_logger.add_scalar(m, v, current_round)
self.log_info(fl_ctx, str(table))
# Save checkpoint if necessary
if self.best_metric < metrics[self.key_metric]:
self.best_metric = metrics[self.key_metric]
self.trainer.save_checkpoint(self.best_model_path, self.model)
self.trainer.save_checkpoint(self.last_model_path, self.model)
# Calculate weight diff
local_weights = extract_weights(self.model)
weight_diff = {}
for var_name in local_weights:
weight_diff[var_name] = local_weights[var_name] - global_weights[var_name]
if np.any(np.isnan(weight_diff[var_name])):
self.system_panic(f"{var_name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# Create DXO and return
dxo = DXO(
data_kind=DataKind.WEIGHT_DIFF,
data=weight_diff,
meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self.aggregation_steps},
)
return dxo.to_shareable()
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
if model_name == ModelName.BEST_MODEL:
model_data = None
try:
model_data = torch.load(self.best_model_path, map_location="cpu")
self.log_info(fl_ctx, f"Load best model from {self.best_model_path}")
except Exception as e:
self.log_error(fl_ctx, f"Unable to load best model: {e}")
if model_data:
data = {}
for var_name in model_data["model"]:
data[var_name] = model_data[var_name].numpy()
dxo = DXO(data_kind=DataKind.WEIGHTS, data=data)
return dxo.to_shareable()
else:
self.log_error(fl_ctx, f"best local model not available at {self.best_model_path}")
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
else:
self.log_error(fl_ctx, f"Unknown model_type {model_name}")
return make_reply(ReturnCode.BAD_TASK_DATA)
def validate(self, data: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# 1. Extract data from shareable
model_owner = data.get_header(AppConstants.MODEL_OWNER, "global_model")
validate_type = data.get_header(AppConstants.VALIDATE_TYPE)
# 2. Prepare dataset
phase = None
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
phase = "validate"
elif validate_type == ValidateType.MODEL_VALIDATE:
phase = "test"
if self.dm.get_data_loader(phase) is None:
self.dm.setup(phase)
data_loader = self.dm.get_data_loader(phase)
# 3. Update model weight
try:
dxo = from_shareable(data)
except:
self.log_error(fl_ctx, "Error when extracting DXO from shareable")
return make_reply(ReturnCode.BAD_TASK_DATA)
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS")
return make_reply(ReturnCode.BAD_TASK_DATA)
load_weights(self.model, dxo.data)
# 4. Run validation
self.model = self.model.to("cuda:0")
for i in range(self._max_retry + 1):
try:
data_loader = self.dm.get_data_loader(phase)
raw_metrics = self.validator.run(self.model, data_loader)
break
except Exception as e:
if i < self._max_retry:
self.log_warning(fl_ctx, f"Error encountered in validation, retrying ({i+1}/{self._max_retry}).")
# Cleanup previous dataset & dataloader
data_loader = None
self.dm._data_loader[phase] = None
self.dm._dataset[phase] = None
# Recreate dataset & dataloader
self.dm.setup(phase)
# Assume both the model & validator are correct
else:
raise RuntimeError(traceback.format_exc())
self.log_info(
fl_ctx,
f"Validation metrics of {model_owner}'s model on" f" {fl_ctx.get_identity_name()}'s data: {raw_metrics}",
)
# For validation before training, only key metric is needed
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
metrics = {MetaKey.INITIAL_METRICS: raw_metrics[self.key_metric]}
# Save as best model
if self.best_metric < raw_metrics[self.key_metric]:
self.best_metric = raw_metrics[self.key_metric]
self.trainer.save_checkpoint(self.best_model_path, self.model)
else:
metrics = raw_metrics
# 5. Return results
dxo = DXO(data_kind=DataKind.METRICS, data=metrics)
return dxo.to_shareable()
def finalize(self, fl_ctx: FLContext):
self.dm.teardown()
self.tb_logger.close()
| NVFlare-main | research/condist-fl/src/condist_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | research/condist-fl/src/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from monai.losses import DiceCELoss, MaskedDiceLoss
from monai.networks import one_hot
from monai.utils import LossReduction
from torch import Tensor
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
class ConDistTransform(object):
def __init__(
self,
num_classes: int,
foreground: Sequence[int],
background: Sequence[Union[int, Sequence[int]]],
temperature: float = 2.0,
):
self.num_classes = num_classes
self.foreground = foreground
self.background = background
if temperature < 0.0:
raise ValueError("Softmax temperature must be a postive number!")
self.temperature = temperature
def softmax(self, data: Tensor):
return torch.softmax(data / self.temperature, dim=1)
def reduce_channels(self, data: Tensor, eps: float = 1e-5):
batch, channels, *shape = data.shape
if channels != self.num_classes:
raise ValueError(f"Expect input with {self.num_classes} channels, get {channels}")
fg_shape = [batch] + [1] + shape
bg_shape = [batch] + [len(self.background)] + shape
# Compute the probability for the union of local foreground
fg = torch.zeros(fg_shape, dtype=torch.float32, device=data.device)
for c in self.foreground:
fg += data[:, c, ::].view(*fg_shape)
# Compute the raw probabilities for each background group
bg = torch.zeros(bg_shape, dtype=torch.float32, device=data.device)
for i, g in enumerate(self.background):
if isinstance(g, int):
bg[:, i, ::] = data[:, g, ::]
else:
for c in g:
bg[:, i, ::] += data[:, c, ::]
# Compute condistional probability for background groups
return bg / (1.0 - fg + eps)
def generate_mask(self, targets: Tensor, ground_truth: Tensor):
targets = torch.argmax(targets, dim=1, keepdim=True)
# The mask covers the background but excludes false positive areas
condition = torch.zeros_like(targets, device=targets.device)
for c in self.foreground:
condition = torch.where(torch.logical_or(targets == c, ground_truth == c), 1, condition)
mask = 1 - condition
return mask.astype(torch.float32)
def __call__(self, preds: Tensor, targets: Tensor, ground_truth: Tensor) -> Tuple[Tensor]:
mask = self.generate_mask(targets, ground_truth)
preds = self.softmax(preds)
preds = self.reduce_channels(preds)
targets = self.softmax(targets)
targets = self.reduce_channels(targets)
return preds, targets, mask
class MarginalTransform(object):
def __init__(self, foreground: Sequence[int], softmax: bool = False):
self.foreground = foreground
self.softmax = softmax
def reduce_background_channels(self, tensor: Tensor) -> Tensor:
n_chs = tensor.shape[1]
slices = torch.split(tensor, 1, dim=1)
fg = [slices[i] for i in self.foreground]
bg = sum([slices[i] for i in range(n_chs) if i not in self.foreground])
output = torch.cat([bg] + fg, dim=1)
return output
def __call__(self, preds: Tensor, target: Tensor) -> Tuple[Tensor]:
n_pred_ch = preds.shape[1]
if n_pred_ch == 1:
# Marginal loss is not intended for single channel output
return preds, target
if self.softmax:
preds = torch.softmax(preds, 1)
if target.shape[1] == 1:
target = one_hot(target, num_classes=n_pred_ch)
elif target.shape != n_pred_ch:
raise ValueError(f"Number of channels of label must be 1 or {n_pred_ch}.")
preds = self.reduce_background_channels(preds)
target = self.reduce_background_channels(target)
return preds, target
class ConDistDiceLoss(_Loss):
def __init__(
self,
num_classes: int,
foreground: Sequence[int],
background: Sequence[Union[int, Sequence[int]]],
temperature: float = 2.0,
include_background: bool = True,
other_act: Optional[Callable] = None,
squared_pred: bool = False,
jaccard: bool = False,
reduction: Union[LossReduction, str] = LossReduction.MEAN,
smooth_nr: float = 1e-5,
smooth_dr: float = 1e-5,
batch: bool = False,
) -> None:
super().__init__()
self.transform = ConDistTransform(num_classes, foreground, background, temperature=temperature)
self.dice = MaskedDiceLoss(
include_background=include_background,
to_onehot_y=False,
sigmoid=False,
softmax=False,
other_act=other_act,
squared_pred=squared_pred,
jaccard=jaccard,
reduction=reduction,
smooth_nr=smooth_nr,
smooth_dr=smooth_dr,
batch=batch,
)
def forward(self, preds: Tensor, targets: Tensor, ground_truth: Tensor):
n_chs = preds.shape[1]
if (ground_truth.shape[1] > 1) and (ground_truth.shape[1] == n_chs):
ground_truth = torch.argmax(ground_truth, dim=1, keepdim=True)
preds, targets, mask = self.transform(preds, targets, ground_truth)
return self.dice(preds, targets, mask=mask)
class MarginalDiceCELoss(_Loss):
def __init__(
self,
foreground: Sequence[int],
include_background: bool = True,
softmax: bool = False,
other_act: Optional[Callable] = None,
squared_pred: bool = False,
jaccard: bool = False,
reduction: str = "mean",
smooth_nr: float = 1e-5,
smooth_dr: float = 1e-5,
batch: bool = False,
ce_weight: Optional[Tensor] = None,
lambda_dice: float = 1.0,
lambda_ce: float = 1.0,
):
super().__init__()
self.transform = MarginalTransform(foreground, softmax=softmax)
self.dice_ce = DiceCELoss(
include_background=include_background,
to_onehot_y=False,
sigmoid=False,
softmax=False,
other_act=other_act,
squared_pred=squared_pred,
jaccard=jaccard,
reduction=reduction,
smooth_nr=smooth_nr,
smooth_dr=smooth_dr,
batch=batch,
ce_weight=ce_weight,
lambda_dice=lambda_dice,
lambda_ce=lambda_ce,
)
def forward(self, preds: Tensor, targets: Tensor):
preds, targets = self.transform(preds, targets)
return self.dice_ce(preds, targets)
class MoonContrasiveLoss(torch.nn.Module):
def __init__(self, tau: float = 1.0):
super().__init__()
if tau <= 0.0:
raise ValueError("tau must be positive")
self.tau = tau
def forward(self, z: Tensor, z_prev: Tensor, z_glob: Tensor):
sim_prev = F.cosine_similarity(z, z_prev, dim=1)
sim_glob = F.cosine_similarity(z, z_glob, dim=1)
exp_prev = torch.exp(sim_prev / self.tau)
exp_glob = torch.exp(sim_glob / self.tau)
loss = -torch.log(exp_glob / (exp_glob + exp_prev))
return loss.mean()
| NVFlare-main | research/condist-fl/src/losses.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
def load_weights(model: torch.nn.Module, weights: Dict[str, np.ndarray]) -> torch.nn.Module:
local_var_dict = model.state_dict()
model_keys = weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
w = weights[var_name]
try:
local_var_dict[var_name] = torch.as_tensor(np.reshape(w, local_var_dict[var_name].shape))
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error {str(e)}")
model.load_state_dict(local_var_dict)
return model
def extract_weights(model: torch.nn.Module) -> Dict[str, np.ndarray]:
local_state_dict = model.state_dict()
local_model_dict = {}
for var_name in local_state_dict:
try:
local_model_dict[var_name] = local_state_dict[var_name].cpu().numpy()
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed with error: {str(e)}")
return local_model_dict
| NVFlare-main | research/condist-fl/src/utils/model_weights.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
from typing import Dict
def get_model(config: Dict):
module = import_module(config["path"])
if hasattr(module, config["name"]):
C = getattr(module, config["name"])
return C(**config["args"])
else:
raise ValueError(f'Unable to find {config["name"]} from module {config["path"]}')
| NVFlare-main | research/condist-fl/src/utils/get_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
from monai.transforms import (
Compose,
EnsureChannelFirstd,
EnsureTyped,
LoadImaged,
Orientationd,
RandAdjustContrastd,
RandCropByPosNegLabeld,
RandGaussianNoised,
RandGaussianSmoothd,
RandRotated,
RandZoomd,
Spacingd,
SpatialPadd,
)
from .augmentations import (
RandAdjustBrightnessAndContrastd,
RandFlipAxes3Dd,
RandInverseIntensityGammad,
SimulateLowResolutiond,
)
from .normalize import NormalizeIntensityRanged
def get_train_transforms(num_samples: int = 1):
transforms = Compose(
[
LoadImaged(keys=["image", "label"], image_only=True),
EnsureChannelFirstd(keys=["image", "label"]),
Orientationd(keys=["image", "label"], as_closest_canonical=True),
Spacingd(
keys=["image", "label"], pixdim=[1.44423774, 1.44423774, 2.87368553], mode=["bilinear", "nearest"]
),
RandRotated(
keys=["image", "label"],
range_x=0.5236,
range_y=0.5236,
range_z=0.5236,
prob=0.2,
mode=["bilinear", "nearest"],
keep_size=False,
),
RandZoomd(
keys=["image", "label"],
prob=0.2,
min_zoom=0.7,
max_zoom=1.4,
mode=["trilinear", "nearest"],
keep_size=False,
),
NormalizeIntensityRanged(keys=["image"], a_min=-54.0, a_max=258.0, subtrahend=100.0, divisor=50.0),
SpatialPadd(keys=["image", "label"], spatial_size=[224, 224, 64]),
RandCropByPosNegLabeld(
keys=["image", "label"],
label_key="label",
spatial_size=[224, 224, 64],
pos=2.0,
neg=1.0,
num_samples=num_samples,
image_key="image",
),
RandGaussianNoised(keys=["image"], prob=0.15, mean=0.0, std=0.1),
RandGaussianSmoothd(keys=["image"], sigma_x=[0.5, 1.5], sigma_y=[0.5, 1.5], sigma_z=[0.5, 1.5], prob=0.15),
RandAdjustBrightnessAndContrastd(
keys=["image"], probs=[0.15, 0.15], brightness_range=[0.7, 1.3], contrast_range=[0.65, 1.5]
),
SimulateLowResolutiond(keys=["image"], prob=0.25, zoom_range=[0.5, 1.0]),
RandAdjustContrastd(keys=["image"], prob=0.15, gamma=[0.8, 1.2]),
RandInverseIntensityGammad(keys=["image"], prob=0.15, gamma=[0.8, 1.2]),
RandFlipAxes3Dd(keys=["image", "label"], prob_x=0.50, prob_y=0.50, prob_z=0.50),
EnsureTyped(keys=["image", "label"]),
]
)
return transforms
def get_validate_transforms():
transforms = Compose(
[
LoadImaged(keys=["image", "label"], image_only=True),
EnsureChannelFirstd(keys=["image", "label"]),
Orientationd(keys=["image", "label"], as_closest_canonical=True),
Spacingd(
keys=["image", "label"], pixdim=[1.44423774, 1.44423774, 2.87368553], mode=["bilinear", "nearest"]
),
NormalizeIntensityRanged(keys=["image"], a_min=-54.0, a_max=258.0, subtrahend=100.0, divisor=50.0),
EnsureTyped(keys=["image", "label"]),
]
)
return transforms
def get_infer_transforms():
transforms = Compose(
[
LoadImaged(keys=["image", "label"], image_only=True),
EnsureChannelFirstd(keys=["image", "label"]),
Orientationd(keys=["image", "label"], as_closest_canonical=True),
Spacingd(
keys=["image", "label"], pixdim=[1.44423774, 1.44423774, 2.87368553], mode=["bilinear", "nearest"]
),
NormalizeIntensityRanged(keys=["image"], a_min=-54.0, a_max=258.0, subtrahend=100.0, divisor=50.0),
EnsureTyped(keys=["image", "label"]),
]
)
return transforms
def get_transforms(mode: Literal["train", "validate", "infer"], num_samples: int = 1):
if mode == "train":
return get_train_transforms(num_samples=num_samples)
if mode == "validate":
return get_validate_transforms()
if mode == "infer":
return get_infer_transforms()
raise ValueError(f"Unsupported transform mode {mode}.")
| NVFlare-main | research/condist-fl/src/data/transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Hashable, Mapping, Optional
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.transforms import MapTransform, Transform
from monai.transforms.utils_pytorch_numpy_unification import clip
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import convert_data_type, convert_to_tensor
class NormalizeIntensityRange(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, a_min: float, a_max: float, subtrahend: float, divisor: float, dtype: DtypeLike = np.float32):
if a_min > a_max:
raise ValueError("a_min must be lesser than a_max.")
self.a_min = a_min
self.a_max = a_max
self.subtrahend = subtrahend
self.divisor = divisor
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
subtrahend: Optional[float] = None,
divisor: Optional[float] = None,
dtype: Optional[DtypeLike] = None,
) -> NdarrayOrTensor:
if subtrahend is None:
subtrahend = self.subtrahend
if divisor is None:
divisor = self.divisor
if dtype is None:
dtype = self.dtype
img = convert_to_tensor(img, track_meta=get_track_meta())
img = clip(img, self.a_min, self.a_max)
img = (img - subtrahend) / divisor
ret: NdarrayOrTensor = convert_data_type(img, dtype=dtype)[0]
return ret
class NormalizeIntensityRanged(MapTransform):
backend = NormalizeIntensityRange.backend
def __init__(
self,
keys: KeysCollection,
a_min: float,
a_max: float,
subtrahend: float,
divisor: float,
dtype: Optional[DtypeLike] = np.float32,
allow_missing_keys: bool = False,
):
super().__init__(keys, allow_missing_keys)
self.t = NormalizeIntensityRange(a_min, a_max, subtrahend, divisor, dtype=dtype)
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
for key in self.keys:
d[key] = self.t(d[key])
return d
| NVFlare-main | research/condist-fl/src/data/normalize.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monai.data import DataLoader
from torch.utils.data import Dataset as _Dataset
def create_data_loader(dataset: _Dataset, batch_size: int = 1, num_workers: int = 0, shuffle: bool = False):
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle)
| NVFlare-main | research/condist-fl/src/data/data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["DataManager"]
from .data_manager import DataManager
| NVFlare-main | research/condist-fl/src/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Dict, Literal
from monai.data import CacheDataset, Dataset
from monai.data.decathlon_datalist import load_decathlon_datalist
from .transforms import get_transforms
def create_dataset(app_root: str, config: Dict, split: str, mode: Literal["train", "validate", "infer"]):
data_root = config["data_root"]
data_list = config["data_list"]
num_samples = config.get("num_samples", 1)
ds_config = config.get("dataset", {})
use_cache_dataset = ds_config.get("use_cache_dataset", False)
if use_cache_dataset:
cache_num = ds_config.get("cache_num", sys.maxsize)
cache_rate = ds_config.get("cache_rate", 1.0)
num_workers = ds_config.get("num_workers", 1)
data = load_decathlon_datalist(data_list, is_segmentation=True, data_list_key=split, base_dir=data_root)
transforms = get_transforms(mode=mode, num_samples=num_samples)
if use_cache_dataset:
ds = CacheDataset(data, transforms, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
else:
ds = Dataset(data, transforms)
return ds
| NVFlare-main | research/condist-fl/src/data/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from .data_loader import create_data_loader
from .dataset import create_dataset
class DataManager(object):
def __init__(self, app_root: str, config: Dict):
self.app_root = app_root
self.config = config
self._dataset = {}
self._data_loader = {}
def _build_dataset(self, stage: str):
if stage == "train":
mode = "train"
split = "training"
elif stage == "validate":
mode = "validate"
split = "validation"
elif stage == "test":
mode = "validate"
split = "testing"
else:
raise ValueError(f"Unknown stage {stage} for dataset")
return create_dataset(self.app_root, self.config, split, mode)
def _build_data_loader(self, stage: str):
ds = self._dataset.get(stage)
if stage == "train":
dl = create_data_loader(
ds,
batch_size=self.config["data_loader"].get("batch_size", 1),
num_workers=self.config["data_loader"].get("num_workers", 0),
shuffle=True,
)
else:
dl = create_data_loader(ds, batch_size=1, num_workers=self.config["data_loader"].get("num_workers", 0))
return dl
def setup(self, stage: Optional[str] = None):
if stage is None:
for s in ["train", "validate", "test"]:
self._dataset[s] = self._build_dataset(s)
self._data_loader[s] = _build_data_loader(s)
elif stage in ["train", "validate", "test"]:
self._dataset[stage] = self._build_dataset(stage)
self._data_loader[stage] = self._build_data_loader(stage)
def get_dataset(self, stage: str):
return self._dataset.get(stage, None)
def get_data_loader(self, stage: str):
return self._data_loader.get(stage, None)
def teardown(self):
self._dataset = {}
self._data_loader = {}
| NVFlare-main | research/condist-fl/src/data/data_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Hashable, List, Mapping, Optional, Sequence, Union
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.transforms import MapTransform, RandomizableTransform
from monai.transforms.utils_pytorch_numpy_unification import clip, max, min
from monai.utils.enums import TransformBackends
from monai.utils.misc import ensure_tuple_rep
from monai.utils.type_conversion import convert_data_type, convert_to_tensor
class RandAdjustBrightnessAndContrast(RandomizableTransform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
probs: Union[float, List[float]] = [0.15, 0.15],
brightness_range: Optional[List[float]] = None,
contrast_range: Optional[List[float]] = None,
dtype: DtypeLike = np.float32,
):
probs = ensure_tuple_rep(probs, 2)
if brightness_range is None:
p = 0.0
else:
p = probs[0]
if len(brightness_range) == 2:
self.brightness = sorted(brightness_range)
else:
raise ValueError("Brightness range must be None or a list with length 2.")
if contrast_range is None:
q = 0.0
else:
q = probs[1]
if len(contrast_range) == 2:
self.contrast = sorted(contrast_range)
else:
raise ValueError("Contrast range must be None or a list with length 2.")
prob = (p + q) - p * q
RandomizableTransform.__init__(self, prob)
self.prob_b = p
self.prob_c = q
self._brightness = None
self._contrast = None
self.dtype = dtype
def clear(self):
self._brightness = None
self._contrast = None
self._do_transform = False
def randomize(self, data: Any = None) -> None:
self.clear()
p, q = self.R.rand(2)
if p < self.prob_b:
self._brightness = self.R.uniform(low=self.brightness[0], high=self.brightness[1])
self._do_transform = True
if q < self.prob_c:
self._contrast = self.R.uniform(low=self.contrast[0], high=self.contrast[1])
self._do_transform = True
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
img = convert_to_tensor(img, track_meta=get_track_meta())
min_intensity = min(img)
max_intensity = max(img)
scale = 1.0
if self._brightness:
scale *= self._brightness
min_intensity *= self._brightness
max_intensity *= self._brightness
if self._contrast:
scale *= self._contrast
img *= scale
img = clip(img, min_intensity, max_intensity)
ret: NdarrayOrTensor = convert_data_type(img, dtype=self.dtype)[0]
return ret
class RandAdjustBrightnessAndContrastd(MapTransform, RandomizableTransform):
backend = RandAdjustBrightnessAndContrast.backend
def __init__(
self,
keys: KeysCollection,
probs: Union[float, List[float]] = [0.15, 0.15],
brightness_range: Optional[List[float]] = None,
contrast_range: Optional[List[float]] = None,
dtype: DtypeLike = np.float32,
):
MapTransform.__init__(self, keys)
RandomizableTransform.__init__(self, 1.0)
self.t = RandAdjustBrightnessAndContrast(probs, brightness_range, contrast_range, dtype)
def randomize(self) -> None:
self.t.randomize()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
self.randomize()
if not self.t._do_transform:
for key in self.keys:
d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
return d
for key in self.keys:
d[key] = self.t(d[key], randomize=False)
return d
class RandInverseIntensityGamma(RandomizableTransform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, prob: float = 0.15, gamma: Union[Sequence[float], float] = (0.7, 1.5)):
RandomizableTransform.__init__(self, prob)
if isinstance(gamma, (int, float)):
if gamma <= 0.5:
raise ValueError("If gamma is single number, gamma must >= 0.5.")
self.gamma = (0.5, gamma)
elif len(gamma) != 2:
raise ValueError("Gamma must a pair of numbers.")
else:
self.gamma = (min(gamma), max(gamma))
self.gamma_value: Optional[float] = None
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
img = convert_to_tensor(img, track_meta=get_track_meta())
if randomize:
self.randomize()
if not self._do_transform:
return img
if self.gamma_value is None:
raise RuntimeError("`gamma_value` is None, call randomize first.")
eps = 1e-7
min_intensity = min(img)
max_intensity = max(img)
y = 1.0 - (img - min_intensity) / (max_intensity - min_intensity + eps)
y = y**self.gamma_value
y = (1.0 - y) * (max_intensity - min_intensity) + min_intensity
return y
class RandInverseIntensityGammad(MapTransform, RandomizableTransform):
backend = RandInverseIntensityGamma.backend
def __init__(self, keys: KeysCollection, prob: float = 0.15, gamma: Union[Sequence[float], float] = (0.7, 1.5)):
MapTransform.__init__(self, keys)
RandomizableTransform.__init__(self, 1.0)
self.t = RandInverseIntensityGamma(prob, gamma)
def randomize(self, data: Optional[Any] = None) -> None:
self.t.randomize()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
self.randomize()
if not self.t._do_transform:
for key in self.keys:
d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
return d
for key in self.keys:
d[key] = self.t(d[key], randomize=False)
return d
| NVFlare-main | research/condist-fl/src/data/augmentations/intensity.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"RandAdjustBrightnessAndContrast",
"RandAdjustBrightnessAndContrastd",
"RandInverseIntensityGamma",
"RandInverseIntensityGammad",
"RandFlipAxes3D",
"RandFlipAxes3Dd",
"SimulateLowResolution",
"SimulateLowResolutiond",
]
from .intensity import (
RandAdjustBrightnessAndContrast,
RandAdjustBrightnessAndContrastd,
RandInverseIntensityGamma,
RandInverseIntensityGammad,
)
from .spatial import RandFlipAxes3D, RandFlipAxes3Dd, SimulateLowResolution, SimulateLowResolutiond
| NVFlare-main | research/condist-fl/src/data/augmentations/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Hashable, List, Mapping
import numpy as np
import torch
import torch.nn.functional as F
from monai.config import DtypeLike, KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.transforms import MapTransform, RandomizableTransform
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import convert_data_type, convert_to_tensor
class RandFlipAxes3D(RandomizableTransform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, prob_x: float = 0.5, prob_y: float = 0.5, prob_z: float = 0.5, dtype: DtypeLike = np.float32):
def ensure_probability(p, name):
if 0.0 <= p <= 1.0:
return p
raise ValueError(f"Probability {name} must between 0 and 1.")
p = ensure_probability(prob_x, "prob_x")
q = ensure_probability(prob_y, "prob_y")
r = ensure_probability(prob_z, "prob_z")
prob = 1.0 - (1.0 - p) * (1.0 - q) * (1.0 - r)
RandomizableTransform.__init__(self, prob) # This is useless
self.p = p
self.q = q
self.r = r
self._flip_x = False
self._flip_y = False
self._flip_z = False
self.dtype = dtype
def randomize(self) -> None:
p, q, r = self.R.rand(3)
self._flip_x = p < self.p
self._flip_y = q < self.q
self._flip_z = r < self.r
if self._flip_x or self._flip_y or self._flip_z:
self._do_transform = True
else:
self._do_transform = False
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
img = convert_to_tensor(img, track_meta=get_track_meta())
dim = len(img.shape)
axes = []
if self._flip_x:
axes.append(dim - 3)
if self._flip_y:
axes.append(dim - 2)
if self._flip_z:
axes.append(dim - 1)
if len(axes) > 0:
img = torch.flip(img, axes)
ret: NdarrayOrTensor = convert_data_type(img, dtype=self.dtype)[0]
return ret
class RandFlipAxes3Dd(RandomizableTransform, MapTransform):
def __init__(
self,
keys: KeysCollection,
prob_x: float = 0.5,
prob_y: float = 0.5,
prob_z: float = 0.5,
dtype: DtypeLike = np.float32,
):
MapTransform.__init__(self, keys)
RandomizableTransform.__init__(self, 1.0)
self.t = RandFlipAxes3D(prob_x, prob_y, prob_z, dtype)
def randomize(self) -> None:
self.t.randomize()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
self.randomize()
if not self.t._do_transform:
for key in self.keys:
d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
return d
for key in self.keys:
d[key] = self.t(d[key], randomize=False)
return d
class SimulateLowResolution(RandomizableTransform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, prob: float = 0.125, zoom_range: List[float] = [0.5, 1.0], dtype: DtypeLike = np.float32):
RandomizableTransform.__init__(self, prob)
self.zoom_range = zoom_range
self._zoom_scale = 1.0
self.dtype = dtype
def randomize(self) -> None:
super().randomize(None)
if not self._do_transform:
return
self._zoom_scale = self.R.uniform(self.zoom_range[0], self.zoom_range[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
img = convert_to_tensor(img, track_meta=get_track_meta())
img = img.unsqueeze(0) # Add batch dimension
# Compute target shape
target_shape = [int(round(s * self._zoom_scale)) for s in img.shape[2:]]
tmp = F.interpolate(img, size=target_shape, mode="nearest-exact")
img = F.interpolate(tmp, size=img.shape[2:], mode="trilinear")
img = img.squeeze(0) # Remove batch dimension
ret: NdarrayOrTensor = convert_data_type(img, dtype=self.dtype)[0]
return ret
class SimulateLowResolutiond(RandomizableTransform, MapTransform):
backend = SimulateLowResolution.backend
def __init__(
self,
keys: KeysCollection,
prob: float = 0.125,
zoom_range: List[float] = [0.5, 1.0],
dtype: DtypeLike = np.float32,
):
MapTransform.__init__(self, keys)
RandomizableTransform.__init__(self, 1.0)
self.t = SimulateLowResolution(prob, zoom_range, dtype)
def randomize(self) -> None:
self.t.randomize()
def __call__(self, data: Mapping[Hashable, NdarrayOrTensor]) -> Dict[Hashable, NdarrayOrTensor]:
d = dict(data)
self.randomize()
if not self.t._do_transform:
for key in self.keys:
d[key] = convert_to_tensor(d[key], track_meta=get_track_meta())
return d
for key in self.keys:
d[key] = self.t(d[key], randomize=False)
return d
| NVFlare-main | research/condist-fl/src/data/augmentations/spatial.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["Trainer", "ConDistTrainer"]
from .condist import ConDistTrainer
from .trainer import Trainer
| NVFlare-main | research/condist-fl/src/trainer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from pathlib import Path, PurePath
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from losses import MarginalDiceCELoss
from monai.losses import DeepSupervisionLoss
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
class Trainer(object):
def __init__(self, task_config: Dict):
self.init_lr = task_config["training"].get("lr", 1e-2)
self.max_steps = task_config["training"]["max_steps"]
self.max_rounds = task_config["training"]["max_rounds"]
self.use_half_precision = task_config["training"].get("use_half_precision", False)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_half_precision)
num_classes = len(task_config["classes"])
foreground = task_config["condist_config"]["foreground"]
background = task_config["condist_config"]["background"]
self.model_config = task_config["model"]
self.marginal_loss_fn = MarginalDiceCELoss(foreground, softmax=True, smooth_nr=0.0, batch=True)
self.ds_loss_fn = DeepSupervisionLoss(self.marginal_loss_fn, weights=[0.5333, 0.2667, 0.1333, 0.0667])
self.current_step = 0
self.current_round = 0
self.opt = None
self.opt_state = None
self.sch = None
self.sch_state = None
def update_condist_weight(self):
left = min(self.weight_range)
right = max(self.weight_range)
intv = (right - left) / (self.max_rounds - 1)
self.weight = left + intv * self.current_round
def configure_optimizer(self):
self.opt = SGD(self.model.parameters(), lr=self.init_lr, momentum=0.99, nesterov=True, weight_decay=1e-5)
if self.opt_state is not None:
self.opt.load_state_dict(self.opt_state)
self.sch = CosineAnnealingLR(self.opt, T_max=self.max_steps, eta_min=1e-7)
if self.sch_state is not None:
self.sch.load_state_dict(self.sch_state)
def training_step(self, model: nn.Module, batch: Dict, device: str = "cuda:0"):
image = batch["image"].to(device)
label = batch["label"].to(device)
preds = model(image)
if preds.dim() == 6:
preds = [preds[:, i, ::] for i in range(preds.shape[1])]
loss = self.ds_loss_fn(preds, label)
# Log training information
if self.logger is not None:
step = self.current_step
self.logger.add_scalar("loss", loss, step)
self.logger.add_scalar("lr", self.sch.get_last_lr()[-1], step)
return loss
def get_batch(self, data_loader: DataLoader, num_steps: int):
it = iter(data_loader)
for i in range(num_steps):
try:
batch = next(it)
except StopIteration:
it = iter(data_loader)
batch = next(it)
yield batch
def training_loop(self, data_loader: DataLoader, num_steps: int, device: str = "cuda:0"):
target_step = self.current_step + num_steps
with tqdm(total=num_steps, dynamic_ncols=True) as pbar:
# Configure progress bar
pbar.set_description(f"Round {self.current_round}")
for batch in self.get_batch(data_loader, num_steps):
# Forward
with torch.cuda.amp.autocast(enabled=self.use_half_precision):
loss = self.training_step(self.model, batch)
# Backward
self.opt.zero_grad()
self.scaler.scale(loss).backward()
# Gradient clipping
self.scaler.unscale_(self.opt)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0, norm_type=2.0)
# Apply gradient
self.scaler.step(self.opt)
self.sch.step()
self.scaler.update()
# Update progress bar
pbar.set_postfix(loss=f"{loss.item():.2f}")
pbar.update(1)
self.current_step += 1
if self.current_step >= target_step:
break
if self.abort is not None and self.abort.triggered:
break
def setup(self, model: nn.Module, logger: SummaryWriter, abort_signal: Any):
self.model = model
self.logger = logger
if abort_signal is not None:
self.abort = abort_signal
else:
self.abort = None
self.configure_optimizer()
def cleanup(self):
# Save opt & sch states
self.opt_state = deepcopy(self.opt.state_dict())
self.sch_state = deepcopy(self.sch.state_dict())
# Cleanup opt, sch & models
self.sch = None
self.opt = None
self.model = None
self.logger = None
self.abort_signal = None
# Cleanup GPU cache
torch.cuda.empty_cache()
def save_checkpoint(self, path: str, model: nn.Module) -> None:
path = PurePath(path)
Path(path.parent).mkdir(parents=True, exist_ok=True)
ckpt = {
"round": self.current_round,
"global_steps": self.current_step,
"model": model.state_dict(),
"optimizer": self.opt_state,
"scheduler": self.sch_state,
}
torch.save(ckpt, str(path))
def load_checkpoint(self, path: str, model: nn.Module) -> nn.Module:
ckpt = torch.load(path)
self.current_step = ckpt.get("global_steps", 0)
self.current_round = ckpt.get("round", 0)
self.opt_state = ckpt.get("optimizer", None)
self.sch_state = ckpt.get("scheduler", None)
model.load_state_dict(ckpt["model"])
return model
def run(
self,
model: nn.Module,
data_loader: DataLoader,
num_steps: int,
device: str = "cuda:0",
logger: Optional[SummaryWriter] = None,
abort_signal: Optional[Any] = None,
):
self.setup(model, logger, abort_signal)
# Run training
self.model.train()
self.training_loop(data_loader, num_steps)
self.current_round += 1
self.cleanup()
| NVFlare-main | research/condist-fl/src/trainer/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from pathlib import Path, PurePath
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from losses import ConDistDiceLoss, MarginalDiceCELoss
from monai.losses import DeepSupervisionLoss
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.get_model import get_model
class ConDistTrainer(object):
def __init__(self, task_config: Dict):
self.init_lr = task_config["training"].get("lr", 1e-2)
self.max_steps = task_config["training"]["max_steps"]
self.max_rounds = task_config["training"]["max_rounds"]
self.use_half_precision = task_config["training"].get("use_half_precision", False)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_half_precision)
num_classes = len(task_config["classes"])
foreground = task_config["condist_config"]["foreground"]
background = task_config["condist_config"]["background"]
temperature = task_config["condist_config"].get("temperature", 2.0)
self.model_config = task_config["model"]
self.weight_range = task_config["condist_config"]["weight_schedule_range"]
self.condist_loss_fn = ConDistDiceLoss(
num_classes, foreground, background, temperature=temperature, smooth_nr=0.0, batch=True
)
self.marginal_loss_fn = MarginalDiceCELoss(foreground, softmax=True, smooth_nr=0.0, batch=True)
self.ds_loss_fn = DeepSupervisionLoss(self.marginal_loss_fn, weights=[0.5333, 0.2667, 0.1333, 0.0667])
self.current_step = 0
self.current_round = 0
self.opt = None
self.opt_state = None
self.sch = None
self.sch_state = None
def update_condist_weight(self):
left = min(self.weight_range)
right = max(self.weight_range)
intv = (right - left) / (self.max_rounds - 1)
self.weight = left + intv * self.current_round
def configure_optimizer(self):
self.opt = SGD(self.model.parameters(), lr=self.init_lr, momentum=0.99, nesterov=True, weight_decay=1e-5)
if self.opt_state is not None:
self.opt.load_state_dict(self.opt_state)
self.sch = CosineAnnealingLR(self.opt, T_max=self.max_steps, eta_min=1e-7)
if self.sch_state is not None:
self.sch.load_state_dict(self.sch_state)
def training_step(self, model: nn.Module, batch: Dict, device: str = "cuda:0"):
image = batch["image"].to(device)
label = batch["label"].to(device)
preds = model(image)
if preds.dim() == 6:
preds = [preds[:, i, ::] for i in range(preds.shape[1])]
ds_loss = self.ds_loss_fn(preds, label)
with torch.no_grad():
targets = self.global_model(image)
if targets.dim() == 6:
targets = targets[:, 0, ::]
condist_loss = self.condist_loss_fn(preds[0], targets, label)
loss = ds_loss + self.weight * condist_loss
# Log training information
if self.logger is not None:
step = self.current_step
self.logger.add_scalar("loss", loss, step)
self.logger.add_scalar("loss_sup", ds_loss, step)
self.logger.add_scalar("loss_condist", condist_loss, step)
self.logger.add_scalar("lr", self.sch.get_last_lr()[-1], step)
self.logger.add_scalar("condist_weight", self.weight, step)
return loss
def get_batch(self, data_loader: DataLoader, num_steps: int):
it = iter(data_loader)
for i in range(num_steps):
try:
batch = next(it)
except StopIteration:
it = iter(data_loader)
batch = next(it)
yield batch
def training_loop(self, data_loader: DataLoader, num_steps: int, device: str = "cuda:0"):
self.global_model = self.global_model.to(device)
target_step = self.current_step + num_steps
with tqdm(total=num_steps, dynamic_ncols=True) as pbar:
# Configure progress bar
pbar.set_description(f"Round {self.current_round}")
for batch in self.get_batch(data_loader, num_steps):
# Forward
with torch.cuda.amp.autocast(enabled=self.use_half_precision):
loss = self.training_step(self.model, batch)
# Backward
self.opt.zero_grad()
self.scaler.scale(loss).backward()
# Gradient clipping
self.scaler.unscale_(self.opt)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0, norm_type=2.0)
# Apply gradient
self.scaler.step(self.opt)
self.sch.step()
self.scaler.update()
# Update progress bar
pbar.set_postfix(loss=f"{loss.item():.2f}")
pbar.update(1)
self.current_step += 1
if self.current_step >= target_step:
break
if self.abort is not None and self.abort.triggered:
break
def setup(self, model: nn.Module, logger: SummaryWriter, abort_signal: Any):
self.model = model
# self.global_model = deepcopy(model).eval()
self.global_model = get_model(self.model_config)
self.global_model.load_state_dict(deepcopy(model.state_dict()))
self.global_model.eval()
self.logger = logger
if abort_signal is not None:
self.abort = abort_signal
else:
self.abort = None
self.configure_optimizer()
self.update_condist_weight()
def cleanup(self):
# Save opt & sch states
self.opt_state = deepcopy(self.opt.state_dict())
self.sch_state = deepcopy(self.sch.state_dict())
# Cleanup opt, sch & models
self.sch = None
self.opt = None
self.model = None
self.global_model = None
self.logger = None
self.abort_signal = None
# Cleanup GPU cache
torch.cuda.empty_cache()
def save_checkpoint(self, path: str, model: nn.Module) -> None:
path = PurePath(path)
Path(path.parent).mkdir(parents=True, exist_ok=True)
ckpt = {
"round": self.current_round,
"global_steps": self.current_step,
"model": model.state_dict(),
"optimizer": self.opt_state,
"scheduler": self.sch_state,
}
torch.save(ckpt, str(path))
def load_checkpoint(self, path: str, model: nn.Module) -> nn.Module:
ckpt = torch.load(path)
self.current_step = ckpt.get("global_steps", 0)
self.current_round = ckpt.get("round", 0)
self.opt_state = ckpt.get("optimizer", None)
self.sch_state = ckpt.get("scheduler", None)
model.load_state_dict(ckpt["model"])
return model
def run(
self,
model: nn.Module,
data_loader: DataLoader,
num_steps: int,
device: str = "cuda:0",
logger: Optional[SummaryWriter] = None,
abort_signal: Optional[Any] = None,
):
self.setup(model, logger, abort_signal)
# Run training
self.model.train()
self.training_loop(data_loader, num_steps)
self.current_round += 1
self.cleanup()
| NVFlare-main | research/condist-fl/src/trainer/condist.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import uuid
def read_json(filename):
assert os.path.isfile(filename), f"{filename} does not exist!"
with open(filename, "r") as f:
return json.load(f)
def write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job", type=str, default="cifar10_fedavg", help="Path to job config.")
parser.add_argument("--poc", action="store_true", help="Whether admin uses POC mode.")
parser.add_argument(
"--train_split_root", type=str, default="/tmp/cifar10_splits", help="Location where to save data splits."
)
parser.add_argument(
"--alpha",
type=float,
default=0.0,
help="Value controls the degree of heterogeneity. "
"Lower values of alpha means higher heterogeneity."
"Values of <= 0. means no data sampling. "
"Assumes central training.",
)
args = parser.parse_args()
# update alpha and split data dir
job_name = os.path.basename(args.job)
client_config_filename = os.path.join(args.job, job_name, "config", "config_fed_client.json")
server_config_filename = os.path.join(args.job, job_name, "config", "config_fed_server.json")
meta_config_filename = os.path.join(args.job, "meta.json")
if args.alpha > 0.0:
client_config = read_json(client_config_filename)
server_config = read_json(server_config_filename)
meta_config = read_json(meta_config_filename)
print(f"Set alpha to {args.alpha}")
token = str(uuid.uuid4())
job_name = f"{job_name}_alpha{args.alpha}"
server_config["alpha"] = args.alpha
meta_config["name"] = job_name
split_dir = os.path.join(args.train_split_root, f"{job_name}_{token}")
print(f"Set train split root to {split_dir}")
server_config["TRAIN_SPLIT_ROOT"] = split_dir
client_config["TRAIN_SPLIT_ROOT"] = split_dir
write_json(client_config, client_config_filename)
write_json(server_config, server_config_filename)
write_json(meta_config, meta_config_filename)
print(f"Updated {meta_config_filename} to alpha={args.alpha}")
else:
print("Assuming centralized training.")
if __name__ == "__main__":
main()
| NVFlare-main | research/auto-fed-rl/set_alpha.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from nvflare.apis.dxo import DataKind, MetaKey, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.learnable import Learnable
from nvflare.app_common.abstract.model import make_model_learnable
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator
from nvflare.security.logging import secure_format_exception
from .autofedrl_constants import AutoFedRLConstants
class AutoFedRLFedOptModelShareableGenerator(FullModelShareableGenerator):
def __init__(
self,
optimizer_args: dict = None,
lr_scheduler_args: dict = None,
source_model="model",
device=None,
):
"""Implement the FedOpt algorithm.
The algorithm is proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020).
This SharableGenerator will update the global model using the specified
PyTorch optimizer and learning rate scheduler.
Args:
optimizer_args: dictionary of optimizer arguments, e.g.
{'path': 'torch.optim.SGD', 'args': {'lr': 1.0}} (default).
lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, e.g.
{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}} (default: None).
source_model: either a valid torch model object or a component ID of a torch model object
device: specify the device to run server-side optimization, e.g. "cpu" or "cuda:0"
(will default to cuda if available and no device is specified).
Raises:
TypeError: when any of input arguments does not have correct type
"""
super().__init__()
if not optimizer_args:
self.logger("No optimizer_args provided. Using FedOpt with SGD and lr 1.0")
optimizer_args = {"name": "SGD", "args": {"lr": 1.0}}
if not isinstance(optimizer_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}}."
)
if lr_scheduler_args is not None:
if not isinstance(lr_scheduler_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. "
"{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}}."
)
self.source_model = source_model
self.optimizer_args = optimizer_args
self.lr_scheduler_args = lr_scheduler_args
self.model = None
self.optimizer = None
self.lr_scheduler = None
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
self.optimizer_name = None
self.lr_scheduler_name = None
def _get_component_name(self, component_args):
if component_args is not None:
name = component_args.get("path", None)
if name is None:
name = component_args.get("name", None)
return name
else:
return None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
# initialize the optimizer with current global model params
engine = fl_ctx.get_engine()
if isinstance(self.source_model, str):
self.model = engine.get_component(self.source_model)
else:
self.model = self.source_model
if self.model is None:
self.system_panic(
"Model is not available",
fl_ctx,
)
return
elif not isinstance(self.model, torch.nn.Module):
self.system_panic(
f"Expected model to be a torch.nn.Module but got {type(self.model)}",
fl_ctx,
)
return
else:
print("server model", self.model)
self.model.to(self.device)
# set up optimizer
try:
# use provided or default optimizer arguments and add the model parameters
if "args" not in self.optimizer_args:
self.optimizer_args["args"] = {}
self.optimizer_args["args"]["params"] = self.model.parameters()
self.optimizer = engine.build_component(self.optimizer_args)
# get optimizer name for log
self.optimizer_name = self._get_component_name(self.optimizer_args)
except Exception as e:
self.system_panic(
f"Exception while parsing `optimizer_args`({self.optimizer_args}): {secure_format_exception(e)}",
fl_ctx,
)
return
# set up lr scheduler
if self.lr_scheduler_args is not None:
try:
self.lr_scheduler_name = self._get_component_name(self.lr_scheduler_args)
# use provided or default lr scheduler argument and add the optimizer
if "args" not in self.lr_scheduler_args:
self.lr_scheduler_args["args"] = {}
self.lr_scheduler_args["args"]["optimizer"] = self.optimizer
self.lr_scheduler = engine.build_component(self.lr_scheduler_args)
except Exception as e:
self.system_panic(
f"Exception while parsing `lr_scheduler_args`({self.lr_scheduler_args}): {secure_format_exception(e)}",
fl_ctx,
)
return
def server_update(self, model_diff, server_lr=None):
"""Updates the global model using the specified optimizer.
Args:
model_diff: the aggregated model differences from clients.
server_lr: server learning rate from search space
Returns:
The updated PyTorch model state dictionary.
"""
self.model.train()
if server_lr is not None:
for param_group in self.optimizer.param_groups:
param_group["lr"] = server_lr
self.optimizer.zero_grad()
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
for name, param in self.model.named_parameters():
param.grad = torch.tensor(-1.0 * model_diff[name]).to(self.device)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return self.model.state_dict()
def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> Learnable:
"""Convert Shareable to Learnable while doing a FedOpt update step.
Supporting data_kind == DataKind.WEIGHT_DIFF
Args:
shareable (Shareable): Shareable to be converted
fl_ctx (FLContext): FL context
Returns:
Model: Updated global ModelLearnable.
"""
# check types
dxo = from_shareable(shareable)
if dxo.data_kind != DataKind.WEIGHT_DIFF:
self.system_panic(
"FedOpt is only implemented for " "data_kind == DataKind.WEIGHT_DIFF",
fl_ctx,
)
return Learnable()
processed_algorithm = dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)
if processed_algorithm is not None:
self.system_panic(
f"FedOpt is not implemented for shareable processed by {processed_algorithm}",
fl_ctx,
)
return Learnable()
model_diff = dxo.data
server_lr = fl_ctx.get_prop(AutoFedRLConstants.HYPERPARAMTER_COLLECTION, {}).get("slr")
if server_lr is not None:
self.log_info(fl_ctx, f"Received and override current server learning rate as: {server_lr}")
start = time.time()
weights = self.server_update(model_diff, server_lr)
secs = time.time() - start
# convert to numpy dict of weights
start = time.time()
for key in weights:
weights[key] = weights[key].detach().cpu().numpy()
secs_detach = time.time() - start
self.log_info(
fl_ctx,
f"FedOpt ({self.optimizer_name}, {self.device}) server model update "
f"round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}, "
f"{self.lr_scheduler_name if self.lr_scheduler_name else ''} "
f"lr: {self.optimizer.param_groups[-1]['lr']}, "
f"update: {secs} secs., detach: {secs_detach} secs.",
)
# TODO: write server-side lr to tensorboard
return make_model_learnable(weights, dxo.get_meta_props())
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_fedopt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.nn.parameter import Parameter
from nvflare.apis.dxo import DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.app_constant import AppConstants
from .autofedrl_constants import AutoFedRLConstants
class PTAutoFedRLSearchSpace(FLComponent):
def __init__(
self,
optimizer_args: dict = None,
lr_scheduler_args: dict = None,
device=None,
search_lr=False,
lr_range=None,
search_ne=False,
ne_range=None,
search_aw=False,
aw_range=None,
search_slr=False,
slr_range=None,
cutoff_interval=5,
n_clients=8,
initial_precision=85.0,
search_type="cs",
):
"""Implement the Auto-FedRL algorithm (https://arxiv.org/abs/2203.06338).
The algorithm is proposed in Reddi, Sashank,
et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020).
This SharableGenerator will update the global model using the specified
PyTorch optimizer and learning rate scheduler.
Args:
optimizer_args: dictionary of optimizer arguments, e.g.
{'path': 'torch.optim.SGD', 'args': {'lr': 1.0}} (default).
lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, e.g.
{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}} (default: None).
device: specify the device to run server-side optimization, e.g. "cpu" or "cuda:0"
(will default to cuda if available and no device is specified).
Raises:
TypeError: when any of input arguments does not have correct type
"""
super().__init__()
if not optimizer_args:
self.logger("No optimizer_args provided. Using FedOpt with SGD and lr 0.01")
optimizer_args = {"name": "Adam", "args": {"lr": 0.01, "betas": (0.7, 0.7)}}
if not isinstance(optimizer_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. {'path': 'torch.optim.SGD', 'args': {'lr': 1.0}}."
)
if lr_scheduler_args is not None:
if not isinstance(lr_scheduler_args, dict):
raise TypeError(
"optimizer_args must be a dict of format, e.g. "
"{'path': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'args': {'T_max': 100}}."
)
if search_type not in ["cs", "drl"]:
raise NotImplementedError("Currently, we only implemented continuous search space")
self.optimizer_args = optimizer_args
self.lr_scheduler_args = lr_scheduler_args
self.optimizer = None
self.lr_scheduler = None
self.search_lr = search_lr
self.lr_range = lr_range
self.search_ne = search_ne
self.ne_range = ne_range
self.search_aw = search_aw
self.aw_range = aw_range
self.search_slr = search_slr
self.slr_range = slr_range
self.n_clients = n_clients
self.initial_precision = initial_precision
self.search_type = search_type
self.cutoff_interval = cutoff_interval
# Set default search ranges
if self.lr_range is None:
self.lr_range = [0.0005, 0.05]
if self.ne_range is None:
self.ne_range = [2, 40]
if self.aw_range is None:
self.aw_range = [0.1, 1.0]
if self.slr_range is None:
self.slr_range = [0.5, 1.5]
# TODO: add checks for valid parameter ranges
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
self.optimizer_name = None
self.lr_scheduler_name = None
def _get_component_name(self, component_args):
if component_args is not None:
name = component_args.get("path", None)
if name is None:
name = component_args.get("name", None)
return name
else:
return None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
# Initialize the optimizer with current global model params
engine = fl_ctx.get_engine()
# Define RL search space
hyperparams_points = []
if self.search_lr:
hyperparams_points += [self.lr_range]
if self.search_ne:
hyperparams_points += [self.ne_range]
if self.search_aw:
hyperparams_points += [self.aw_range for _ in range(self.n_clients)]
if self.search_slr:
hyperparams_points += [self.slr_range]
if self.search_type == "cs":
self.hp_dist = LearnableGaussianContinuousSearch(
hyperparams_points, self.initial_precision, self.device
)
self.hp_dist.to(self.device)
self.log_info(fl_ctx, "Initialized Continuous Search Space")
elif self.search_type == "drl":
# TODO: Deep RL agent requires torch==1.4.0
self.hp_dist = LearnableGaussianContinuousSearchDRL(
hyperparams_points, self.initial_precision, self.device
)
self.hp_dist.to(self.device)
self.log_info(fl_ctx, "Initialized DRL Continuous Search space")
else:
raise NotImplementedError
# Set up optimizer
try:
# Use provided or default optimizer arguments and add the model parameters
if "args" not in self.optimizer_args:
self.optimizer_args["args"] = {}
self.optimizer_args["args"]["params"] = self.hp_dist.parameters()
self.optimizer = engine.build_component(self.optimizer_args)
# Get optimizer name for log
self.optimizer_name = self._get_component_name(self.optimizer_args)
except Exception as e:
self.system_panic(
f"Exception while parsing `optimizer_args`: " f"{self.optimizer_args} with Exception {e}",
fl_ctx,
)
return
# Initialize
self.logprob_history = []
self.val_losses = [-np.inf]
self.log_info(fl_ctx, "Initialized validation loss fpr Search space")
def sample_hyperparamters(self, fl_ctx: FLContext) -> None:
"""Convert Shareable to Learnable while doing a FedOpt update step.
Args:
shareable (Shareable): Shareable to be converted
fl_ctx (FLContext): FL context
Returns:
Model: Updated global ModelLearnable.
"""
hparam, logprob = self.hp_dist.forward()
hparam_list = list(hparam)
if self.search_lr:
lrate = hparam_list.pop(0)
if self.search_ne:
train_iters_per_round = hparam_list.pop(0)
train_iters_per_round = int(train_iters_per_round + 0.5)
if self.search_aw:
aw = [hparam_list.pop(0) for _ in range(self.n_clients)]
aw_tensor = torch.tensor([aw])
aw_tensor = F.softmax(aw_tensor, dim=1)
weight = [aw_tensor[:, i].item() for i in range(self.n_clients)]
if self.search_slr:
slr = hparam_list.pop(0)
# Add constrains to prevent negative value
if self.search_lr:
lrate = lrate if lrate > 0.0001 else 0.0001
if self.search_ne:
train_iters_per_round = int(train_iters_per_round + 0.5) if train_iters_per_round >= 1 else 1
if self.search_slr:
slr = slr if slr > 0.0001 else 0.0001
self.logprob_history.append(logprob)
self.log_info(fl_ctx, f"Hyperparameter Search at round {fl_ctx.get_prop(AppConstants.CURRENT_ROUND)}")
if self.search_lr:
self.log_info(fl_ctx, f"Learning rate: {lrate}")
if self.search_ne:
self.log_info(fl_ctx, f"Number of local epochs: {train_iters_per_round}")
if self.search_aw:
self.log_info(fl_ctx, f"Aggregation weights: {weight}")
if self.search_lr:
self.log_info(fl_ctx, f"Server learning rate {slr}")
if self.search_lr:
self.log_info(fl_ctx, f"dist mean: {self.hp_dist.mean}")
if self.search_lr:
self.log_info(fl_ctx, f"precision component: {self.hp_dist.precision_component}")
hps = {
"lr": lrate if self.search_lr else None,
"ne": train_iters_per_round if self.search_ne else None,
"aw": weight if self.search_aw else None,
"slr": slr if self.search_slr else None,
}
fl_ctx.set_prop(AutoFedRLConstants.HYPERPARAMTER_COLLECTION, hps, private=True, sticky=False)
def update_search_space(self, shareable, fl_ctx: FLContext) -> None:
if not isinstance(shareable, Shareable):
raise TypeError("shareable must be Shareable, but got {}.".format(type(shareable)))
dxo = from_shareable(shareable)
if dxo.data_kind == DataKind.METRICS:
val_loss = dxo.data["val_loss"]
else:
raise ValueError("data_kind should be DataKind.METRICS, but got {}".format(dxo.data_kind))
self.val_losses.append(torch.tensor(val_loss, dtype=torch.float32, device=self.device))
start = time.time()
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
# Get cutoff val losses for windowed updates
cutoff_round = max(0, current_round - self.cutoff_interval)
# Ignore initial loss
val_losses_torch = torch.tensor(list(self.val_losses[1:]), dtype=torch.float32, device=self.device)
val_losses_cut = val_losses_torch[cutoff_round:]
current_loss = 0
current_improvements = [0]
# Compute hp search loss
if len(val_losses_cut) > 1:
current_improvements = -((val_losses_cut[1:] / val_losses_cut[:-1]) - 1)
current_mean_improvements = current_improvements.mean()
updates_limit = len(current_improvements) + 1
for j in range(1, updates_limit):
current_loss = self.logprob_history[-j - 1] * (current_improvements[-j] - current_mean_improvements)
# Update search space
if not (type(current_loss) == int): # TODO: Is this needed?
self.optimizer.zero_grad()
(-current_loss).backward(retain_graph=True)
self.optimizer.step()
# We need release the memory based on cutoff interval
if len(self.logprob_history) > self.cutoff_interval:
self.log_info(fl_ctx, (f"Release Memory......at round {current_round}"))
release_list = self.logprob_history[: -self.cutoff_interval]
keep_list = self.logprob_history[-self.cutoff_interval :]
tmp_list = [logpro.detach() for logpro in release_list]
self.logprob_history = tmp_list + keep_list
secs = time.time() - start
self.log_info(
fl_ctx,
f"Finished Auto-FedRL search space update ({self.optimizer_name}, {self.device}) "
f"round {current_round}, HP loss {current_loss} "
f"lr: {self.optimizer.param_groups[-1]['lr']}, "
f"update: {secs} secs.",
)
self.log_debug(fl_ctx, f"val loss: {self.val_losses}) ")
self.log_debug(fl_ctx, f"logprob_history: {self.logprob_history}) ")
class LearnableGaussianContinuousSearch(torch.nn.Module):
def __init__(self, hyperparams_points, initial_precision=None, device="cpu"):
super(LearnableGaussianContinuousSearch, self).__init__()
self.dim = len(hyperparams_points)
self.hps = [np.array(x) for x in hyperparams_points]
self.hps_center = torch.tensor([(x[0] + x[-1]) / 2 for x in self.hps]).to(device)
self.hps_scale = torch.tensor([x[-1] - x[0] for x in self.hps]).to(device)
self.mean = Parameter(torch.zeros(self.dim))
precision_val = 5.0 if initial_precision is None else initial_precision
precision_component = torch.sqrt(torch.eye(self.dim) * precision_val)
self.precision_component = Parameter(precision_component)
def forward(self):
self.mean.data.copy_(torch.clamp(self.mean.data, -0.5, 0.5))
self.dist = MultivariateNormal(loc=self.mean, precision_matrix=self.precision_component)
sample = self.dist.sample()
logprob = self.dist.log_prob(sample)
sample = sample * self.hps_scale + self.hps_center
return sample, logprob
class LearnableGaussianContinuousSearchDRL(torch.nn.Module):
def __init__(self, hyperparams_points, initial_precision=None, device="cpu", rl_nettype="mlp"):
super(LearnableGaussianContinuousSearchDRL, self).__init__()
self.dim = len(hyperparams_points)
if rl_nettype == "mlp":
self.PolicyNet = PolicyNet(self.dim).to(device)
else:
raise NotImplementedError
self.hps = [np.array(x) for x in hyperparams_points]
self.hps_center = torch.tensor([(x[0] + x[-1]) / 2 for x in self.hps]).to(device)
self.hps_scale = torch.tensor([x[-1] - x[0] for x in self.hps]).to(device)
self.mean = torch.zeros(self.dim) + 10e-8
precision_val = 5.0 if initial_precision is None else initial_precision
precision_component = torch.sqrt(torch.eye(self.dim) * precision_val) + 10e-8
self.precision_component = precision_component
def forward(self):
mean_update, precision_component_update = self.PolicyNet(self.mean, self.precision_component)
self.mean = self.mean + mean_update
self.precision_component = self.precision_component + precision_component_update
self.mean.data.copy_(torch.clamp(self.mean.data, -1.0, 1.0))
dist = MultivariateNormal(
loc=self.mean, precision_matrix=torch.mm(self.precision_component, self.precision_component.t())
)
sample = dist.sample()
logprob = dist.log_prob(sample)
sample = sample * self.hps_scale + self.hps_center
return sample, logprob
class PolicyNet(torch.nn.Module):
def __init__(self, input_dim):
super(PolicyNet, self).__init__()
self.input_dim = input_dim
in_chanel = input_dim * input_dim + input_dim
self.fc_layer = nn.Sequential(
nn.Linear(in_chanel, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, in_chanel),
nn.Tanh(),
)
def forward(self, mean, precision_component):
tmp = torch.cat([mean, precision_component.reshape((-1,))])
input = torch.unsqueeze(tmp, 0)
x = torch.squeeze(self.fc_layer(input)) / 100.0
mean_update = x[: self.input_dim]
precision_component_update = x[self.input_dim :].reshape((self.input_dim, self.input_dim))
return mean_update, precision_component_update
| NVFlare-main | research/auto-fed-rl/src/autofedrl/pt_autofedrl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .autofedrl_cifar10_learner import CIFAR10AutoFedRLearner
from .autofedrl_fedopt import AutoFedRLFedOptModelShareableGenerator
from .autofedrl_learner_executor import AutoFedRLLearnerExecutor
from .autofedrl_model_aggregator import AutoFedRLWeightedAggregator
from .autofedrl_scatter_and_gather import ScatterAndGatherAutoFedRL
from .pt_autofedrl import PTAutoFedRLSearchSpace
| NVFlare-main | research/auto-fed-rl/src/autofedrl/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.impl.controller import ClientTask, Task
from nvflare.apis.shareable import Shareable
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.app_event_type import AppEventType
from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.import_utils import optional_import
from nvflare.widgets.info_collector import GroupInfoCollector, InfoCollector
from .autofedrl_constants import AutoFedRLConstants
class ScatterAndGatherAutoFedRL(ScatterAndGather):
def __init__(
self,
min_clients: int = 1,
num_rounds: int = 5,
start_round: int = 0,
wait_time_after_min_received: int = 10,
aggregator_id=AppConstants.DEFAULT_AGGREGATOR_ID,
aggregator_search_val_id=AppConstants.DEFAULT_AGGREGATOR_ID,
persistor_id=AppConstants.DEFAULT_PERSISTOR_ID,
shareable_generator_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
search_space_id=AppConstants.DEFAULT_SHAREABLE_GENERATOR_ID,
train_task_name=AppConstants.TASK_TRAIN,
train_timeout: int = 0,
ignore_result_error: bool = False,
):
"""The controller for ScatterAndGather Workflow used to implement Auto-FedRL algorithm (https://arxiv.org/abs/2203.06338).
The ScatterAndGather workflow defines FederatedAveraging on all clients.
The model persistor (persistor_id) is used to load the initial global model which is sent to all clients.
Each client sends it's updated weights after local training which is aggregated (aggregator_id). The
shareable generator is used to convert the aggregated weights to shareable and shareable back to weight.
The model_persistor also saves the model after training.
Args:
min_clients (int, optional): Min number of clients in training. Defaults to 1.
num_rounds (int, optional): The total number of training rounds. Defaults to 5.
start_round (int, optional): Start round for training. Defaults to 0.
wait_time_after_min_received (int, optional): Time to wait before beginning aggregation after
contributions received. Defaults to 10.
aggregator_id (str, optional): ID of the aggregator component. Defaults to "aggregator".
persistor_id (str, optional): ID of the persistor component. Defaults to "persistor".
shareable_generator_id (str, optional): ID of the shareable generator. Defaults to "shareable_generator".
train_task_name (str, optional): Name of the train task. Defaults to "train".
train_timeout (int, optional): Time to wait for clients to do local training.
ignore_result_error (bool, optional): whether this controller can proceed if client result has errors.
Defaults to False.
Raises:
TypeError: when any of input arguments does not have correct type
ValueError: when any of input arguments is out of range
"""
super().__init__(
min_clients=min_clients,
num_rounds=num_rounds,
start_round=start_round,
wait_time_after_min_received=wait_time_after_min_received,
aggregator_id=aggregator_id,
persistor_id=persistor_id,
shareable_generator_id=shareable_generator_id,
train_task_name=train_task_name,
train_timeout=train_timeout,
ignore_result_error=ignore_result_error,
)
# Check arguments
if not isinstance(aggregator_search_val_id, str):
raise TypeError(
"aggregator_search_val_id must be a string but got {}".format(type(aggregator_search_val_id))
)
if not isinstance(search_space_id, str):
raise TypeError("search_space_id must be a string but got {}".format(type(search_space_id)))
self.aggregator_search_val_id = aggregator_search_val_id
self.search_space_id = search_space_id
self.aggregator_search_val = None
self.search_space = None
# use FOBS serializing/deserializing PyTorch tensors (if torch is available)
torch, torch_available = optional_import(module="torch")
if torch_available:
from nvflare.app_opt.pt.decomposers import TensorDecomposer
fobs.register(TensorDecomposer)
def start_controller(self, fl_ctx: FLContext) -> None:
super().start_controller(fl_ctx=fl_ctx)
self.log_info(fl_ctx, "Initializing ScatterAndGatherAutoFedRL workflow.")
# used for collecting hp val loss from clients
self.aggregator_search_val = self._engine.get_component(self.aggregator_search_val_id)
if not isinstance(self.aggregator_search_val, Aggregator):
self.system_panic(
f"aggregator {self.aggregator_search_val_id} must be an Aggregator type object but got {type(self.aggregator)}",
fl_ctx,
)
return
# define the search space
self.search_space = self._engine.get_component(self.search_space_id)
if not isinstance(self.search_space, FLComponent):
self.system_panic(
f"Shareable generator {self.search_space_id} must be a ShareableGenerator type object, "
f"but got {type(self.search_space)}",
fl_ctx,
)
return
def control_flow(self, abort_signal: Signal, fl_ctx: FLContext) -> None:
try:
self.log_info(fl_ctx, "Beginning ScatterAndGather training phase.")
self._phase = AppConstants.PHASE_TRAIN
fl_ctx.set_prop(AppConstants.PHASE, self._phase, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.NUM_ROUNDS, self._num_rounds, private=True, sticky=False)
self.fire_event(AppEventType.TRAINING_STARTED, fl_ctx)
if self._current_round is None:
self._current_round = self._start_round
while self._current_round < self._start_round + self._num_rounds:
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.log_info(fl_ctx, f"Round {self._current_round} started.")
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_weights, private=True, sticky=True)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self._current_round, private=True, sticky=False)
self.fire_event(AppEventType.ROUND_STARTED, fl_ctx)
# Search hyperparameters
self.search_space.sample_hyperparamters(fl_ctx)
# Create val_task for hp search
data_shareable_val: Shareable = self.shareable_gen.learnable_to_shareable(self._global_weights, fl_ctx)
data_shareable_val.set_header(AppConstants.CURRENT_ROUND, self._current_round)
data_shareable_val.set_header(AppConstants.NUM_ROUNDS, self._num_rounds)
data_shareable_val.add_cookie(AppConstants.CONTRIBUTION_ROUND, self._current_round)
val_task = Task(
name=AutoFedRLConstants.MODEL_VALIDATE_FOR_SEARCH,
data=data_shareable_val,
props={},
timeout=self._train_timeout,
before_task_sent_cb=self._prepare_train_task_data,
result_received_cb=self._process_val_result,
)
self.broadcast_and_wait(
task=val_task,
min_responses=self._min_clients,
wait_time_after_min_received=self._wait_time_after_min_received,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
# aggregate val loss for hp search
self.fire_event(AppEventType.BEFORE_AGGREGATION, fl_ctx)
aggr_val_result = self.aggregator_search_val.aggregate(fl_ctx)
self.fire_event(AppEventType.AFTER_AGGREGATION, fl_ctx)
# Create train_task
data_shareable: Shareable = self.shareable_gen.learnable_to_shareable(self._global_weights, fl_ctx)
data_shareable.set_header(AppConstants.CURRENT_ROUND, self._current_round)
data_shareable.set_header(AppConstants.NUM_ROUNDS, self._num_rounds)
data_shareable.add_cookie(AppConstants.CONTRIBUTION_ROUND, self._current_round)
# Send sampled hp to clients
hp = fl_ctx.get_prop(AutoFedRLConstants.HYPERPARAMTER_COLLECTION)
data_shareable.set_header(AutoFedRLConstants.HYPERPARAMTER_COLLECTION, fobs.dumps(hp))
# Assign aggregation weights before task starts, since aw is used
# when the server accepts task results
self.aggregator.update_aggregation_weights(fl_ctx)
train_task = Task(
name=self.train_task_name,
data=data_shareable,
props={},
timeout=self._train_timeout,
before_task_sent_cb=self._prepare_train_task_data,
result_received_cb=self._process_train_result,
)
self.broadcast_and_wait(
task=train_task,
min_responses=self._min_clients,
wait_time_after_min_received=self._wait_time_after_min_received,
fl_ctx=fl_ctx,
abort_signal=abort_signal,
)
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.fire_event(AppEventType.BEFORE_AGGREGATION, fl_ctx)
aggr_result = self.aggregator.aggregate(fl_ctx)
fl_ctx.set_prop(AppConstants.AGGREGATION_RESULT, aggr_result, private=True, sticky=False)
self.fire_event(AppEventType.AFTER_AGGREGATION, fl_ctx)
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.fire_event(AppEventType.BEFORE_SHAREABLE_TO_LEARNABLE, fl_ctx)
self._global_weights = self.shareable_gen.shareable_to_learnable(aggr_result, fl_ctx)
fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, self._global_weights, private=True, sticky=True)
fl_ctx.sync_sticky()
self.fire_event(AppEventType.AFTER_SHAREABLE_TO_LEARNABLE, fl_ctx)
# update search space based on aggregated val loss
self.search_space.update_search_space(aggr_val_result, fl_ctx)
if self._check_abort_signal(fl_ctx, abort_signal):
return
self.fire_event(AppEventType.BEFORE_LEARNABLE_PERSIST, fl_ctx)
self.persistor.save(self._global_weights, fl_ctx)
self.fire_event(AppEventType.AFTER_LEARNABLE_PERSIST, fl_ctx)
self.fire_event(AppEventType.ROUND_DONE, fl_ctx)
self.log_info(fl_ctx, f"Round {self._current_round} finished.")
self._current_round += 1
# Call the self._engine to persist the snapshot of all the FLComponents
self._engine.persist_components(fl_ctx, completed=False)
self._phase = AppConstants.PHASE_FINISHED
self.log_info(fl_ctx, "Finished ScatterAndGather Training.")
except Exception as e:
traceback.print_exc()
error_msg = f"Exception in ScatterAndGather control_flow: {e}"
self.log_exception(fl_ctx, error_msg)
self.system_panic(str(e), fl_ctx)
def stop_controller(self, fl_ctx: FLContext):
self._phase = AppConstants.PHASE_FINISHED
def handle_event(self, event_type: str, fl_ctx: FLContext):
super().handle_event(event_type, fl_ctx)
if event_type == InfoCollector.EVENT_TYPE_GET_STATS:
collector = fl_ctx.get_prop(InfoCollector.CTX_KEY_STATS_COLLECTOR, None)
if collector:
if not isinstance(collector, GroupInfoCollector):
raise TypeError("collector must be GroupInfoCollector but got {}".format(type(collector)))
collector.add_info(
group_name=self._name,
info={"phase": self._phase, "current_round": self._current_round, "num_rounds": self._num_rounds},
)
def _prepare_train_task_data(self, client_task: ClientTask, fl_ctx: FLContext) -> None:
fl_ctx.set_prop(AppConstants.TRAIN_SHAREABLE, client_task.task.data, private=True, sticky=False)
self.fire_event(AppEventType.BEFORE_TRAIN_TASK, fl_ctx)
def _process_train_result(self, client_task: ClientTask, fl_ctx: FLContext) -> None:
result = client_task.result
client_name = client_task.client.name
self._accept_train_result(client_name=client_name, result=result, fl_ctx=fl_ctx)
# Cleanup task result
client_task.result = None
def _process_val_result(self, client_task: ClientTask, fl_ctx: FLContext) -> None:
result = client_task.result
client_name = client_task.client.name
self._accept_val_result(client_name=client_name, result=result, fl_ctx=fl_ctx)
# Cleanup task result
client_task.result = None
def _accept_val_result(self, client_name: str, result: Shareable, fl_ctx: FLContext) -> bool:
rc = result.get_return_code()
contribution_round = result.get_cookie(AppConstants.CONTRIBUTION_ROUND)
result.set_header(AppConstants.CONTRIBUTION_ROUND, contribution_round)
# Raise errors if bad peer context or execution exception.
if rc and rc != ReturnCode.OK:
if self.ignore_result_error:
self.log_error(fl_ctx, f"Ignore the client train result. Train result error code: {rc}")
return False
else:
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.system_panic("Peer context is bad or missing. ScatterAndGather exiting.", fl_ctx=fl_ctx)
return False
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.system_panic(
"Execution Exception in client training. ScatterAndGather exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
]:
self.system_panic("Execution result is not a shareable. ScatterAndGather exiting.", fl_ctx=fl_ctx)
return False
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self._current_round, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.TRAINING_RESULT, result, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.CONTRIBUTION_ROUND, contribution_round, private=True, sticky=False)
self.fire_event(AppEventType.BEFORE_CONTRIBUTION_ACCEPT, fl_ctx)
accepted = self.aggregator_search_val.accept(result, fl_ctx)
accepted_msg = "ACCEPTED" if accepted else "REJECTED"
self.log_info(fl_ctx, f"Contribution from {client_name} {accepted_msg} by the aggregator.")
fl_ctx.set_prop(AppConstants.AGGREGATION_ACCEPTED, accepted, private=True, sticky=False)
self.fire_event(AppEventType.AFTER_CONTRIBUTION_ACCEPT, fl_ctx)
return accepted
def _accept_train_result(self, client_name: str, result: Shareable, fl_ctx: FLContext) -> bool:
rc = result.get_return_code()
contribution_round = result.get_cookie(AppConstants.CONTRIBUTION_ROUND)
result.set_header(AppConstants.CONTRIBUTION_ROUND, contribution_round)
# Raise errors if bad peer context or execution exception.
if rc and rc != ReturnCode.OK:
if self.ignore_result_error:
self.log_error(fl_ctx, f"Ignore the client train result. Train result error code: {rc}")
return False
else:
if rc in [ReturnCode.MISSING_PEER_CONTEXT, ReturnCode.BAD_PEER_CONTEXT]:
self.system_panic("Peer context is bad or missing. ScatterAndGather exiting.", fl_ctx=fl_ctx)
return False
elif rc in [ReturnCode.EXECUTION_EXCEPTION, ReturnCode.TASK_UNKNOWN]:
self.system_panic(
"Execution Exception in client training. ScatterAndGather exiting.", fl_ctx=fl_ctx
)
return False
elif rc in [
ReturnCode.EXECUTION_RESULT_ERROR,
ReturnCode.TASK_DATA_FILTER_ERROR,
ReturnCode.TASK_RESULT_FILTER_ERROR,
]:
self.system_panic("Execution result is not a shareable. ScatterAndGather exiting.", fl_ctx=fl_ctx)
return False
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, self._current_round, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.TRAINING_RESULT, result, private=True, sticky=False)
fl_ctx.set_prop(AppConstants.CONTRIBUTION_ROUND, contribution_round, private=True, sticky=False)
self.fire_event(AppEventType.BEFORE_CONTRIBUTION_ACCEPT, fl_ctx)
accepted = self.aggregator.accept(result, fl_ctx)
accepted_msg = "ACCEPTED" if accepted else "REJECTED"
self.log_info(fl_ctx, f"Contribution from {client_name} {accepted_msg} by the aggregator.")
fl_ctx.set_prop(AppConstants.AGGREGATION_ACCEPTED, accepted, private=True, sticky=False)
self.fire_event(AppEventType.AFTER_CONTRIBUTION_ACCEPT, fl_ctx)
return accepted
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_scatter_and_gather.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_common.executors.learner_executor import LearnerExecutor
from nvflare.security.logging import secure_format_exception
from .autofedrl_constants import AutoFedRLConstants
class AutoFedRLLearnerExecutor(LearnerExecutor):
def __init__(
self,
learner_id,
train_task=AppConstants.TASK_TRAIN,
submit_model_task=AppConstants.TASK_SUBMIT_MODEL,
validate_task=AppConstants.TASK_VALIDATION,
validate_for_search_task=AutoFedRLConstants.MODEL_VALIDATE_FOR_SEARCH,
):
"""Key component to run learner on clients for Auto-FedRL algorithm (https://arxiv.org/abs/2203.06338).
Args:
learner_id (str): id pointing to the learner object
train_task (str, optional): label to dispatch train task. Defaults to AppConstants.TASK_TRAIN.
submit_model_task (str, optional): label to dispatch submit model task. Defaults to AppConstants.TASK_SUBMIT_MODEL.
validate_task (str, optional): label to dispatch validation task. Defaults to AppConstants.TASK_VALIDATION.
validate_for_search_task (str, optional): label to dispatch validate model for hyperparameter search.
"""
super().__init__(
learner_id=learner_id,
train_task=train_task,
submit_model_task=submit_model_task,
validate_task=validate_task,
)
self.validate_for_search_task = validate_for_search_task
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
"""Same as LearnerExecutor.execute() apart for additional support for an `validate_for_search_task`."""
self.log_info(fl_ctx, f"Client trainer got task: {task_name}")
try:
if task_name == self.train_task:
return self.train(shareable, fl_ctx, abort_signal)
elif task_name == self.submit_model_task:
return self.submit_model(shareable, fl_ctx)
elif task_name == self.validate_task:
return self.validate(shareable, fl_ctx, abort_signal)
elif task_name == self.validate_for_search_task:
return self.validate_for_search(shareable, fl_ctx, abort_signal)
else:
self.log_error(fl_ctx, f"Could not handle task: {task_name}")
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
# Task execution error, return EXECUTION_EXCEPTION Shareable
self.log_exception(fl_ctx, f"learner execute exception: {secure_format_exception(e)}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def validate_for_search(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self.log_debug(fl_ctx, f"validate for search abort_signal {abort_signal.triggered}")
shareable.set_header(AppConstants.VALIDATE_TYPE, AutoFedRLConstants.MODEL_VALIDATE_FOR_SEARCH)
validate_result: Shareable = self.learner.validate(shareable, fl_ctx, abort_signal)
if validate_result and isinstance(validate_result, Shareable):
return validate_result
else:
return make_reply(ReturnCode.EMPTY_RESULT)
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_learner_executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pt.learners.cifar10_learner import CIFAR10Learner
from pt.utils.cifar10_data_utils import CIFAR10_ROOT
from pt.utils.cifar10_dataset import CIFAR10_Idx
from torchvision import datasets
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants, ValidateType
from nvflare.app_opt.pt.decomposers import TensorDecomposer
from nvflare.fuel.utils import fobs
from .autofedrl_constants import AutoFedRLConstants
class CIFAR10AutoFedRLearner(CIFAR10Learner): # TODO: also support CIFAR10ScaffoldLearner
def __init__(
self,
train_idx_root: str = "./dataset",
aggregation_epochs: int = 1, # TODO: Is this still being used?
lr: float = 1e-2,
fedproxloss_mu: float = 0.0,
central: bool = False,
analytic_sender_id: str = "analytic_sender",
batch_size: int = 64,
num_workers: int = 0,
entropy_coeff: float = 1.0,
entropy_threshold: float = 2.0,
):
"""Simple CIFAR-10 Trainer utilizing Auto-FedRL.
Args:
train_idx_root: directory with site training indices for CIFAR-10 data.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
lr: local learning rate. Float number. Defaults to 1e-2.
fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx).
central: Bool. Whether to simulate central training. Default False.
analytic_sender_id: id of `AnalyticsSender` if configured as a client component.
If configured, TensorBoard events will be fired. Defaults to "analytic_sender".
batch_size: batch size for training and validation.
num_workers: number of workers for data loaders.
entropy_coeff: entropy cut-off.
entropy_threshold: entropy threshold.
Returns:
a Shareable with the updated local model after running `train()`,
or validation metrics after calling `validate()`,
or the best local model when calling `get_model_for_validation()`
"""
CIFAR10Learner.__init__(
self,
train_idx_root=train_idx_root,
aggregation_epochs=aggregation_epochs,
lr=lr,
fedproxloss_mu=fedproxloss_mu,
central=central,
analytic_sender_id=analytic_sender_id,
batch_size=batch_size,
num_workers=num_workers,
)
self.entropy_coeff = entropy_coeff
self.entropy_threshold = entropy_threshold
self.current_round = 0
self.best_global_acc = 0
# Use FOBS serializing/deserializing PyTorch tensors
fobs.register(TensorDecomposer)
def initialize(self, parts: dict, fl_ctx: FLContext):
# Initialize super class
CIFAR10Learner.initialize(self, parts=parts, fl_ctx=fl_ctx)
# Enabling the Nesterov momentum can stabilize the training.
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=0.0, nesterov=True)
def _create_datasets(self, fl_ctx: FLContext):
"""To be called only after Cifar10DataSplitter downloaded the data and computed splits"""
if self.train_dataset is None or self.train_loader is None:
if not self.central:
# Set datalist, here the path and filename are hard-coded, can also be fed as an argument
site_idx_file_name = os.path.join(self.train_idx_root, self.client_id + ".npy")
self.log_info(fl_ctx, f"IndexList Path: {site_idx_file_name}")
if os.path.exists(site_idx_file_name):
self.log_info(fl_ctx, "Loading subset index")
site_idx = np.load(site_idx_file_name).tolist() # TODO: get from fl_ctx/shareable?
else:
self.system_panic(f"No subset index found! File {site_idx_file_name} does not exist!", fl_ctx)
return
self.log_info(fl_ctx, f"Client subset size: {len(site_idx)}")
else:
site_idx = None # use whole training dataset if self.central=True
self.log_debug(fl_ctx, msg)(fl_ctx, f"site_idx: {site_idx}")
# Train set
n_img_for_search = self.batch_size * 10
self.train_dataset = CIFAR10_Idx(
root=CIFAR10_ROOT,
data_idx=site_idx[:],
train=True,
download=False,
transform=self.transform_train,
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=self.batch_size, shuffle=True
)
# Val set for search
self.val_dataset_for_search = CIFAR10_Idx(
root=CIFAR10_ROOT,
data_idx=site_idx[-n_img_for_search:],
train=True,
download=False,
transform=self.transform_valid,
)
self.val_loader_search = torch.utils.data.DataLoader(
self.val_dataset_for_search, batch_size=self.batch_size, shuffle=False
)
self.log_info(
fl_ctx,
f"Split ({n_img_for_search}) images from {len(site_idx)} training images for Hyerparamters Search",
)
if self.valid_dataset is None or self.valid_loader is None:
self.valid_dataset = datasets.CIFAR10(
root=CIFAR10_ROOT,
train=False,
download=False,
transform=self.transform_valid,
)
self.valid_loader = torch.utils.data.DataLoader(
self.valid_dataset, batch_size=self.batch_size, shuffle=False
)
def local_train(self, fl_ctx, train_loader, model_global, abort_signal: Signal, val_freq: int = 0):
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(fl_ctx, f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})")
avg_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader):
if abort_signal.triggered:
return
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Forward
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
# entropy_cost
if self.entropy_coeff > 0:
probs_output = torch.exp(outputs) / (torch.exp(outputs).sum(1).view(-1, 1))
entropy = -(probs_output * torch.log(probs_output)).sum(1).mean()
entropy_cost = self.entropy_coeff * F.relu(self.entropy_threshold - entropy)
loss += entropy_cost
# Zero the parameter gradients
self.optimizer.zero_grad()
# Backward + Optimize
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
avg_loss += loss.item()
self.writer.add_scalar("train_loss", avg_loss / len(train_loader), current_step)
if val_freq > 0 and epoch % val_freq == 0:
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self._create_datasets(fl_ctx)
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
self.current_round = current_round
# Get lr and ne from server
current_lr, current_ne = None, None
hps = fobs.loads(shareable.get_header(AutoFedRLConstants.HYPERPARAMTER_COLLECTION))
if hps is not None:
current_lr = hps.get("lr")
self.lr = current_lr
current_ne = hps.get("ne")
if current_lr is not None:
for param_group in self.optimizer.param_groups:
param_group["lr"] = current_lr
self.log_info(fl_ctx, f"Received and override current learning rate as: {current_lr}")
if current_ne is not None:
self.aggregation_epochs = current_ne
self.log_info(fl_ctx, f"Received and override current number of local epochs: {current_ne}")
# Update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# Reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# Update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed!") from e
self.model.load_state_dict(local_var_dict)
# Local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# Make a copy of model_global as reference for potential FedProx loss or SCAFFOLD
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
# Local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
val_freq=1 if self.central else 0,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# Perform valid after local train
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_local_model: {acc:.4f}")
# Save model
self.save_model(is_best=False)
if acc > self.best_acc:
self.best_acc = acc
self.save_model(is_best=True)
# Compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32)
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# Build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
if hps.get("aw") is not None:
# When search aggregation weights, we have to override it
# to 1, since we will manually assign weights to aggregator.
# Search space will discover which client is more informative.
# It might not be related to the number of data in a client.
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1)
else:
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def local_valid(self, valid_loader, abort_signal: Signal, tb_id=None, fl_ctx=None, get_loss=False):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
for _, (inputs, labels) in enumerate(valid_loader):
if abort_signal.triggered:
return None
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
_, pred_label = torch.max(outputs.data, 1)
if get_loss:
# Return val loss instead of accuracy over the number batches
total += inputs.data.size()[0]
correct += loss.item()
else:
total += inputs.data.size()[0]
correct += (pred_label == labels.data).sum().item()
metric = correct / float(total)
if get_loss:
self.log_info(fl_ctx, f"HP Search loss: {metric} of {total} batches on {fl_ctx.get_identity_name()}")
if tb_id:
self.writer.add_scalar(tb_id, metric, self.current_round)
return metric
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
self._create_datasets(fl_ctx)
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Get validation information
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
model_owner = shareable.get(ReservedHeaderKey.HEADERS).get(AppConstants.MODEL_OWNER)
if model_owner:
self.log_info(fl_ctx, f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()}")
else:
# Evaluating global model during training
model_owner = "global_model"
# Update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# Update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError(f"Convert weight from {var_name} failed!") from e
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# Perform valid before local train
global_acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_global_model", fl_ctx=fl_ctx)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_global_model ({model_owner}): {global_acc}")
if global_acc > self.best_global_acc:
self.best_global_acc = global_acc
# Log the best global model_accuracy
self.writer.add_scalar("best_val_acc_global_model", self.best_global_acc, self.current_round)
return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: global_acc}, meta={}).to_shareable()
elif validate_type == ValidateType.MODEL_VALIDATE:
# Perform valid
train_acc = self.local_valid(self.train_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"training acc ({model_owner}): {train_acc}")
val_acc = self.local_valid(self.valid_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"validation acc ({model_owner}): {val_acc}")
self.log_info(fl_ctx, "Evaluation finished. Returning shareable")
val_results = {"train_accuracy": train_acc, "val_accuracy": val_acc}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
elif validate_type == AutoFedRLConstants.MODEL_VALIDATE_FOR_SEARCH:
self.log_info(fl_ctx, f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()} for HP Search")
val_loss_hp = self.local_valid(self.val_loader_search, abort_signal, fl_ctx=fl_ctx, get_loss=True)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
val_results = {"val_loss": val_loss_hp}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_cifar10_learner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.aggregators.intime_accumulate_model_aggregator import InTimeAccumulateWeightedAggregator
from .autofedrl_constants import AutoFedRLConstants
class AutoFedRLWeightedAggregator(InTimeAccumulateWeightedAggregator):
"""Perform accumulated weighted aggregation with support for updating aggregation weights.
Used for Auto-FedRL implementation (https://arxiv.org/abs/2203.06338).
Shares arguments with base class
"""
def update_aggregation_weights(self, fl_ctx: FLContext):
"""Called when workflow determines to update aggregation weights
Args:
fl_ctx (FLContext): context provided by workflow
Returns:
None
"""
received_aggregation_weights = None
hps = fl_ctx.get_prop(AutoFedRLConstants.HYPERPARAMTER_COLLECTION)
if hps is not None:
received_aggregation_weights = hps.get("aw")
# assign current aggregation weights to aggregator
if received_aggregation_weights is not None:
# TODO: Here, we assume contributor_name is "site-*".
# this will be wrong if contributor_name is not in this pattern.
aggregation_weights_dict = {
f"site-{i+1}": received_aggregation_weights[i] for i in range(len(received_aggregation_weights))
}
for key in self.expected_data_kind.keys():
self.dxo_aggregators[key].aggregation_weights = aggregation_weights_dict
self.log_info(fl_ctx, f"Assign current aggregation weights to aggregator: {key}")
else:
self.log_warning(fl_ctx, "Received aggregation weights are None.")
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_model_aggregator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class AutoFedRLConstants(object):
HYPERPARAMTER_COLLECTION = "hyperparamter_collection"
MODEL_VALIDATE_FOR_SEARCH = "model_validate_for_hp_search"
| NVFlare-main | research/auto-fed-rl/src/autofedrl/autofedrl_constants.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
from setuptools import find_packages, setup
# read the contents of your README file
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
release = os.environ.get("MONAI_NVFL_RELEASE")
if release == "1":
package_name = "monai-nvflare"
version = "0.2.3"
else:
package_name = "monai-nvflare-nightly"
today = datetime.date.today().timetuple()
year = today[0] % 1000
month = today[1]
day = today[2]
version = f"0.2.3.{year:02d}{month:02d}{day:02d}"
setup(
name=package_name,
version=version,
description="MONAI NVIDIA FLARE integration",
url="https://github.com/NVIDIA/NVFlare",
package_dir={"monai_nvflare": "monai_nvflare"},
packages=find_packages(
where=".",
include=[
"*",
],
exclude=["tests", "tests.*"],
),
license_files=("LICENSE",),
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.8,<3.11",
install_requires=["monai>=1.2.0rc5", "nvflare>=2.3.0"],
)
| NVFlare-main | integration/monai/setup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from monai.apps.utils import download_and_extract
def download_spleen_dataset(filepath, output_dir):
url = "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar"
download_and_extract(url=url, filepath=filepath, output_dir=output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--filepath",
"-f",
type=str,
help="the file path of the downloaded compressed file.",
default="./data/Task09_Spleen.tar",
)
parser.add_argument(
"--output_dir", "-o", type=str, help="target directory to save extracted files.", default="./data"
)
args = parser.parse_args()
download_spleen_dataset(args.filepath, args.output_dir)
| NVFlare-main | integration/monai/examples/spleen_ct_segmentation_sim/download_spleen_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from monai.apps.utils import download_and_extract
def download_spleen_dataset(filepath, output_dir):
url = "https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar"
download_and_extract(url=url, filepath=filepath, output_dir=output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--filepath",
"-f",
type=str,
help="the file path of the downloaded compressed file.",
default="./data/Task09_Spleen.tar",
)
parser.add_argument(
"--output_dir", "-o", type=str, help="target directory to save extracted files.", default="./data"
)
args = parser.parse_args()
download_spleen_dataset(args.filepath, args.output_dir)
| NVFlare-main | integration/monai/examples/spleen_ct_segmentation_local/download_spleen_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner, api_command_wrapper
def read_json(filename):
assert os.path.isfile(filename), f"{filename} does not exist!"
with open(filename, "r") as f:
return json.load(f)
def write_json(data, filename):
with open(filename, "w") as f:
json.dump(data, f, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
parser.add_argument("--username", type=str, default="[email protected]", help="Admin username.")
parser.add_argument("--job", type=str, default="cifar10_fedavg", help="Path to job config.")
parser.add_argument("--poc", action="store_true", help="Whether admin does not use SSL.")
args = parser.parse_args()
assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
# Initialize the runner
runner = FLAdminAPIRunner(
username=args.username,
admin_dir=args.admin_dir,
poc=args.poc,
debug=False,
)
# Submit job
api_command_wrapper(runner.api.submit_job(args.job))
# finish
runner.api.logout()
if __name__ == "__main__":
main()
| NVFlare-main | integration/monai/examples/spleen_ct_segmentation_local/submit_job.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from monai.bundle.config_parser import ConfigParser
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.app_constant import StatisticsConstants as StC
class MonaiDataStatsPersistor(FLComponent):
def __init__(self, fmt="yaml"):
"""Persist pytorch-based from MONAI bundle configuration.
Args:
fmt: format used to save the analysis results using MONAI's `ConfigParser`.
Supported suffixes are "json", "yaml", "yml". Defaults to "yaml".
Raises:
ValueError: when source_ckpt_filename does not exist
"""
super().__init__()
self.fmt = fmt
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.PRE_RUN_RESULT_AVAILABLE:
result = fl_ctx.get_prop(StC.PRE_RUN_RESULT)
if result:
for client_name, _result in result.items():
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
output_path = os.path.join(app_root, f"{client_name}_data_stats.{self.fmt}")
ConfigParser.export_config_file(_result.data, output_path, fmt=self.fmt, default_flow_style=None)
self.log_info(fl_ctx, f"Saved data stats of client {client_name} at {output_path}")
else:
self.log_debug(fl_ctx, "Empty pre-task results.")
| NVFlare-main | integration/monai/monai_nvflare/monai_data_stats_persistor.py |
Subsets and Splits