python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (1) import nvflare client API
import nvflare.client as flare
# (optional) set a fix place so we don't need to download everytime
DATASET_PATH = "/tmp/nvflare/data"
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
def main():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
net = Net()
# (2) initializes NVFlare client API
flare.init()
# (3) gets FLModel from NVFlare
input_model = flare.receive()
# (4) loads model from NVFlare
net.load_state_dict(input_model.params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
# (optional) calculate total steps
steps = 2 * len(trainloader)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
PATH = "./cifar_net.pth"
torch.save(net.state_dict(), PATH)
# (5) wraps evaluation logic into a method to re-use for
# evaluation on both trained and received model
def evaluate(input_weights):
net = Net()
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
return 100 * correct // total
# (6) evaluate on received model for model selection
accuracy = evaluate(input_model.params)
# (7) construct trained FL model
output_model = flare.FLModel(
params=net.cpu().state_dict(),
metrics={"accuracy": accuracy},
meta={"NUM_STEPS_CURRENT_ROUND": steps},
)
# (8) send model back to NVFlare
flare.send(output_model)
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_fl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
def main():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
PATH = "./cifar_net.pth"
torch.save(net.state_dict(), PATH)
net = Net()
net.load_state_dict(torch.load(PATH))
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_original.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
PATH = "./cifar_net.pth"
def main():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root="./data", train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
net = Net()
# wraps training logic into a method
def train(total_epochs=2, lr=0.001):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
for epoch in range(total_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
torch.save(net.state_dict(), PATH)
# wraps evaluate logic into a method
def evaluate(input_weights):
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
# return evaluation metrics
return 100 * correct // total
# call train method
train(total_epochs=2, lr=0.001)
# call evaluate method
evaluate(input_weights=torch.load(PATH))
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_structured_original.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision
import torchvision.transforms as transforms
from lit_net import LitNet
from pytorch_lightning import LightningDataModule, Trainer, seed_everything
from torch.utils.data import DataLoader, random_split
seed_everything(7)
DATASET_PATH = "/tmp/nvflare/data"
BATCH_SIZE = 4
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
class CIFAR10DataModule(LightningDataModule):
def __init__(self, data_dir: str = DATASET_PATH, batch_size: int = BATCH_SIZE):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
def prepare_data(self):
torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=transform)
torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=transform)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage == "validate":
cifar_full = torchvision.datasets.CIFAR10(
root=self.data_dir, train=True, download=False, transform=transform
)
self.cifar_train, self.cifar_val = random_split(cifar_full, [0.8, 0.2])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage == "predict":
self.cifar_test = torchvision.datasets.CIFAR10(
root=self.data_dir, train=False, download=False, transform=transform
)
def train_dataloader(self):
return DataLoader(self.cifar_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.cifar_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
def predict_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
def main():
model = LitNet()
cifar10_dm = CIFAR10DataModule()
trainer = Trainer(max_epochs=1, accelerator="auto", devices=1 if torch.cuda.is_available() else None)
# perform local training
print("--- train new model ---")
trainer.fit(model, datamodule=cifar10_dm)
# test local model
print("--- test new model ---")
trainer.test(ckpt_path="best", datamodule=cifar10_dm)
# get predictions
print("--- prediction with new best model ---")
trainer.predict(ckpt_path="best", datamodule=cifar10_dm)
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_lightning_original.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision
import torchvision.transforms as transforms
from lit_net import LitNet
from pytorch_lightning import LightningDataModule, Trainer, seed_everything
from torch.utils.data import DataLoader, random_split
# (1) import nvflare lightning client API
import nvflare.client.lightning as flare
seed_everything(7)
DATASET_PATH = "/tmp/nvflare/data"
BATCH_SIZE = 4
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
class CIFAR10DataModule(LightningDataModule):
def __init__(self, data_dir: str = DATASET_PATH, batch_size: int = BATCH_SIZE):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
def prepare_data(self):
torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=transform)
torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=transform)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage == "validate":
cifar_full = torchvision.datasets.CIFAR10(
root=self.data_dir, train=True, download=False, transform=transform
)
self.cifar_train, self.cifar_val = random_split(cifar_full, [0.8, 0.2])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage == "predict":
self.cifar_test = torchvision.datasets.CIFAR10(
root=self.data_dir, train=False, download=False, transform=transform
)
def train_dataloader(self):
return DataLoader(self.cifar_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.cifar_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
def predict_dataloader(self):
return DataLoader(self.cifar_test, batch_size=self.batch_size)
def main():
model = LitNet()
cifar10_dm = CIFAR10DataModule()
trainer = Trainer(max_epochs=1, accelerator="auto", devices=1 if torch.cuda.is_available() else None)
# (2) patch the lightning trainer
flare.patch(trainer)
# (3) evaluate the current global model to allow server-side model selection
print("--- validate global model ---")
trainer.validate(model, datamodule=cifar10_dm)
# perform local training starting with the received global model
print("--- train new model ---")
trainer.fit(model, datamodule=cifar10_dm)
# test local model
print("--- test new model ---")
trainer.test(ckpt_path="best", datamodule=cifar10_dm)
# get predictions
print("--- prediction with new best model ---")
trainer.predict(ckpt_path="best", datamodule=cifar10_dm)
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_lightning_fl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch.nn as nn
import torch.optim as optim
from net import Net
from pytorch_lightning import LightningModule
from torchmetrics import Accuracy
NUM_CLASSES = 10
criterion = nn.CrossEntropyLoss()
class LitNet(LightningModule):
def __init__(self):
super().__init__()
self.save_hyperparameters()
self.model = Net()
self.train_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES)
self.valid_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES)
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
x, labels = batch
outputs = self(x)
loss = criterion(outputs, labels)
self.train_acc(outputs, labels)
self.log("train_loss", loss)
self.log("train_acc", self.train_acc, on_step=True, on_epoch=False)
return loss
def evaluate(self, batch, stage=None):
x, labels = batch
outputs = self(x)
loss = criterion(outputs, labels)
self.valid_acc(outputs, labels)
if stage:
self.log(f"{stage}_loss", loss)
self.log(f"{stage}_acc", self.valid_acc, on_step=True, on_epoch=True)
return outputs
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
return self.evaluate(batch)
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9)
return {"optimizer": optimizer}
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/lit_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (1) import nvflare client API
import nvflare.client as flare
# (optional) set a fix place so we don't need to download everytime
DATASET_PATH = "/tmp/nvflare/data"
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
PATH = "./cifar_net.pth"
def main():
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
batch_size = 4
trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
net = Net()
# (2) initializes NVFlare client API
flare.init()
# (3) decorates with flare.train and load model from the first argument
# wraps training logic into a method
@flare.train
def train(input_model=None, total_epochs=2, lr=0.001):
net.load_state_dict(input_model.params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
# (optional) calculate total steps
steps = 2 * len(trainloader)
for epoch in range(total_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
torch.save(net.state_dict(), PATH)
# (4) construct trained FL model
output_model = flare.FLModel(params=net.cpu().state_dict(), meta={"NUM_STEPS_CURRENT_ROUND": steps})
return output_model
# (5) decorates with flare.evaluate and load model from the first argument
@flare.evaluate
def fl_evaluate(input_model=None):
return evaluate(input_weights=input_model.params)
# wraps evaluate logic into a method
def evaluate(input_weights):
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
# return evaluation metrics
return 100 * correct // total
# (6) call fl_evaluate method before training
# to evaluate on the received/aggregated model
fl_evaluate()
# call train method
train(total_epochs=2, lr=0.001)
# call evaluate method
evaluate(input_weights=torch.load(PATH))
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/pt/code/cifar10_structured_fl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import datasets
from tf_net import TFNet
# (1) import nvflare client API
import nvflare.client as flare
# (2) import how to load / dump flat weights
from nvflare.app_opt.tf.utils import get_flat_weights, load_flat_weights
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
PATH = "./tf_model.ckpt"
def main():
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
model = TFNet(input_shape=(None, 32, 32, 3))
model.summary()
# (3) initializes NVFlare client API
flare.init()
# (4) gets FLModel from NVFlare
input_model = flare.receive()
# (optional) print system info
system_info = flare.system_info()
print(f"NVFlare system info: {system_info}")
# (5) loads model from NVFlare
load_flat_weights(model, input_model.params)
# (6) evaluate aggregated/received model
_, test_global_acc = model.evaluate(test_images, test_labels, verbose=2)
print(
f"Accuracy of the received model on round {input_model.current_round} on the 10000 test images: {test_global_acc * 100} %"
)
model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
print("Finished Training")
model.save_weights(PATH)
_, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(f"Accuracy of the model on the 10000 test images: {test_acc * 100} %")
# (7) construct trained FL model
output_model = flare.FLModel(params=get_flat_weights(model), metrics={"accuracy": test_global_acc})
# (8) send model back to NVFlare
flare.send(output_model)
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/tf/code/cifar10_tf_fl.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import Model, layers, losses
class TFNet(Model):
def __init__(self, input_shape):
super().__init__()
self.conv1 = layers.Conv2D(6, 5, activation="relu")
self.pool = layers.MaxPooling2D((2, 2), 2)
self.conv2 = layers.Conv2D(16, 5, activation="relu")
self.flatten = layers.Flatten()
self.fc1 = layers.Dense(120, activation="relu")
self.fc2 = layers.Dense(84, activation="relu")
self.fc3 = layers.Dense(10)
loss_fn = losses.SparseCategoricalCrossentropy(from_logits=True)
self.compile(optimizer="sgd", loss=loss_fn, metrics=["accuracy"])
self.build(input_shape)
def call(self, x):
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
| NVFlare-main | examples/hello-world/ml-to-fl/tf/code/tf_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras import datasets
from tf_net import TFNet
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
PATH = "./tf_model.ckpt"
def main():
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
model = TFNet(input_shape=(None, 32, 32, 3))
model.summary()
model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
print("Finished Training")
model.save_weights(PATH)
_, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(f"Accuracy of the model on the 10000 test images: {test_acc * 100} %")
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/ml-to-fl/tf/code/cifar10_tf_original.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNetwork(nn.Module):
def __init__(self):
super(SimpleNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/simple_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import torch.cuda
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from nvflare.apis.dxo import DXO
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class PTModelLocator(ModelLocator):
def __init__(self):
super().__init__()
self.model = SimpleNetwork()
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
return [PTConstants.PTServerName]
def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]:
if model_name == PTConstants.PTServerName:
try:
server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_job_id())
model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName)
if not os.path.exists(model_path):
return None
# Load the torch model
device = "cuda" if torch.cuda.is_available() else "cpu"
data = torch.load(model_path, map_location=device)
# Set up the persistence manager.
if self.model:
default_train_conf = {"train": {"model": type(self.model).__name__}}
else:
default_train_conf = None
# Use persistence manager to get learnable
persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf)
ml = persistence_manager.to_model_learnable(exclude_vars=None)
# Create dxo and return
return model_learnable_to_dxo(ml)
except Exception as e:
self.log_error(fl_ctx, f"Error in retrieving {model_name}: {e}.", fire_event=False)
return None
else:
self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False)
return None
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_model_locator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from cifar10trainer import Cifar10Trainer
from cifar10validator import Cifar10Validator
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.signal import Signal
TRAIN_TASK_NAME = "train"
@pytest.fixture()
def get_cifar_trainer():
with patch.object(Cifar10Trainer, "_save_local_model") as mock_save:
with patch.object(Cifar10Trainer, "_load_local_model") as mock_load:
yield Cifar10Trainer(train_task_name=TRAIN_TASK_NAME, epochs=1)
class TestCifar10Trainer:
@pytest.mark.parametrize("num_rounds", [1, 3])
def test_execute(self, get_cifar_trainer, num_rounds):
trainer = get_cifar_trainer
# just take first batch
iterator = iter(trainer._train_loader)
trainer._train_loader = [next(iterator)]
dxo = DXO(data_kind=DataKind.WEIGHTS, data=trainer.model.state_dict())
result = dxo.to_shareable()
for i in range(num_rounds):
result = trainer.execute(TRAIN_TASK_NAME, shareable=result, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK
@patch.object(Cifar10Trainer, "_save_local_model")
@patch.object(Cifar10Trainer, "_load_local_model")
def test_execute_rounds(self, mock_save, mock_load):
train_task_name = "train"
trainer = Cifar10Trainer(train_task_name=train_task_name, epochs=2)
# just take first batch
myitt = iter(trainer._train_loader)
trainer._train_loader = [next(myitt)]
dxo = DXO(data_kind=DataKind.WEIGHTS, data=trainer.model.state_dict())
result = dxo.to_shareable()
for i in range(3):
result = trainer.execute(train_task_name, shareable=result, fl_ctx=FLContext(), abort_signal=Signal())
assert result.get_return_code() == ReturnCode.OK
class TestCifar10Validator:
def test_execute(self):
validate_task_name = "validate"
validator = Cifar10Validator(validate_task_name=validate_task_name)
# just take first batch
iterator = iter(validator._test_loader)
validator._test_loader = [next(iterator)]
dxo = DXO(data_kind=DataKind.WEIGHTS, data=validator.model.state_dict())
result = validator.execute(
validate_task_name, shareable=dxo.to_shareable(), fl_ctx=FLContext(), abort_signal=Signal()
)
assert result.get_return_code() == ReturnCode.OK
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/test_custom.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import torch
from pt_constants import PTConstants
from simple_network import SimpleNetwork
from torch import nn
from torch.optim import SGD
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo
from nvflare.app_common.app_constant import AppConstants
from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager
class Cifar10Trainer(Executor):
def __init__(
self,
data_path="~/data",
lr=0.01,
epochs=5,
train_task_name=AppConstants.TASK_TRAIN,
submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL,
exclude_vars=None,
pre_train_task_name=AppConstants.TASK_GET_WEIGHTS,
):
"""Cifar10 Trainer handles train and submit_model tasks. During train_task, it trains a
simple network on CIFAR10 dataset. For submit_model task, it sends the locally trained model
(if present) to the server.
Args:
lr (float, optional): Learning rate. Defaults to 0.01
epochs (int, optional): Epochs. Defaults to 5
train_task_name (str, optional): Task name for train task. Defaults to "train".
submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model".
exclude_vars (list): List of variables to exclude during model loading.
pre_train_task_name: Task name for pre train task, i.e., sending initial model weights.
"""
super().__init__()
self._lr = lr
self._epochs = epochs
self._train_task_name = train_task_name
self._pre_train_task_name = pre_train_task_name
self._submit_model_task_name = submit_model_task_name
self._exclude_vars = exclude_vars
# Training setup
self.model = SimpleNetwork()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.loss = nn.CrossEntropyLoss()
self.optimizer = SGD(self.model.parameters(), lr=lr, momentum=0.9)
# Create Cifar10 dataset for training.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
self._train_dataset = CIFAR10(root=data_path, transform=transforms, download=True, train=True)
self._train_loader = DataLoader(self._train_dataset, batch_size=4, shuffle=True)
self._n_iterations = len(self._train_loader)
# Setup the persistence manager to save PT model.
# The default training configuration is used by persistence manager
# in case no initial model is found.
self._default_train_conf = {"train": {"model": type(self.model).__name__}}
self.persistence_manager = PTModelPersistenceFormatManager(
data=self.model.state_dict(), default_train_conf=self._default_train_conf
)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
try:
if task_name == self._pre_train_task_name:
# Get the new state dict and send as weights
return self._get_model_weights()
elif task_name == self._train_task_name:
# Get model weights
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Unable to extract dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Convert weights to tensor. Run training
torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()}
self._local_train(fl_ctx, torch_weights, abort_signal)
# Check the abort_signal after training.
# local_train returns early if abort_signal is triggered.
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# Save the local model after training.
self._save_local_model(fl_ctx)
# Get the new state dict and send as weights
return self._get_model_weights()
elif task_name == self._submit_model_task_name:
# Load local model
ml = self._load_local_model(fl_ctx)
# Get the model parameters and create dxo from it
dxo = model_learnable_to_dxo(ml)
return dxo.to_shareable()
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
except Exception as e:
self.log_exception(fl_ctx, f"Exception in simple trainer: {e}.")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _get_model_weights(self) -> Shareable:
# Get the new state dict and send as weights
weights = {k: v.cpu().numpy() for k, v in self.model.state_dict().items()}
outgoing_dxo = DXO(
data_kind=DataKind.WEIGHTS, data=weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations}
)
return outgoing_dxo.to_shareable()
def _local_train(self, fl_ctx, weights, abort_signal):
# Set the model weights
self.model.load_state_dict(state_dict=weights)
# Basic training
self.model.train()
for epoch in range(self._epochs):
running_loss = 0.0
for i, batch in enumerate(self._train_loader):
if abort_signal.triggered:
# If abort_signal is triggered, we simply return.
# The outside function will check it again and decide steps to take.
return
images, labels = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
predictions = self.model(images)
cost = self.loss(predictions, labels)
cost.backward()
self.optimizer.step()
running_loss += cost.cpu().detach().numpy() / images.size()[0]
if i % 3000 == 0:
self.log_info(
fl_ctx, f"Epoch: {epoch}/{self._epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}"
)
running_loss = 0.0
def _save_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
ml = make_model_learnable(self.model.state_dict(), {})
self.persistence_manager.update(ml)
torch.save(self.persistence_manager.to_persistence_dict(), model_path)
def _load_local_model(self, fl_ctx: FLContext):
run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM))
models_dir = os.path.join(run_dir, PTConstants.PTModelsDir)
if not os.path.exists(models_dir):
return None
model_path = os.path.join(models_dir, PTConstants.PTLocalModelName)
self.persistence_manager = PTModelPersistenceFormatManager(
data=torch.load(model_path), default_train_conf=self._default_train_conf
)
ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars)
return ml
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, Normalize, ToTensor
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class Cifar10Validator(Executor):
def __init__(self, data_path="~/data", validate_task_name=AppConstants.TASK_VALIDATION):
super().__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose(
[
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
test_data = CIFAR10(root=data_path, train=False, transform=transforms)
self._test_loader = DataLoader(test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self._validate(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def _validate(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self._test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PTConstants:
PTServerName = "server"
PTFileModelName = "FL_global_model.pt"
PTLocalModelName = "local_model.pt"
PTModelsDir = "models"
| NVFlare-main | examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | examples/hello-world/step-by-step/cifar10/code/fl/net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# (1) import nvflare client API
import nvflare.client as flare
# default dataset path
CIFAR10_ROOT = "/tmp/nvflare/data/cifar10"
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
def define_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", type=str, default=CIFAR10_ROOT, nargs="?")
parser.add_argument("--batch_size", type=int, default=4, nargs="?")
parser.add_argument("--num_workers", type=int, default=1, nargs="?")
parser.add_argument("--model_path", type=str, default=f"{CIFAR10_ROOT}/cifar_net.pth", nargs="?")
return parser.parse_args()
def _main(args):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_path = args.dataset_path
batch_size = args.batch_size
num_workers = args.num_workers
model_path = args.model_path
trainset = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
net = Net()
# (2) initializes NVFlare client API
flare.init()
# (3) gets FLModel from NVFlare
input_model = flare.receive()
print(
f" current_round={input_model.current_round},"
f" total_round={input_model.total_rounds},"
f" client={flare.system_info().get('site_name', None)}"
)
# (4) loads model from NVFlare
net.load_state_dict(input_model.params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
# (optional) calculate total steps
steps = 2 * len(trainloader)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
torch.save(net.state_dict(), model_path)
# wraps evaluation logic into a method to re-use for
# evaluation on both trained and received model
def evaluate(input_weights):
net = Net()
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
return 100 * correct // total
# evaluation on local trained model
local_accuracy = evaluate(torch.load(model_path))
# (5) evaluate on received model
accuracy = evaluate(input_model.params)
# (6) construct trained FL model
output_model = flare.FLModel(
params=net.cpu().state_dict(),
metrics={"accuracy": accuracy},
meta={"NUM_STEPS_CURRENT_ROUND": steps},
)
# (7) send model back to NVFlare
flare.send(output_model)
def main():
_main(define_parser())
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/step-by-step/cifar10/code/fl/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| NVFlare-main | examples/hello-world/step-by-step/cifar10/code/dl/net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from net import Net
# default dataset path
CIFAR10_ROOT = "/tmp/nvflare/data/cifar10"
# (optional) We change to use GPU to speed things up.
# if you want to use CPU, change DEVICE="cpu"
DEVICE = "cuda:0"
def define_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", type=str, default=CIFAR10_ROOT, nargs="?")
parser.add_argument("--batch_size", type=int, default=4, nargs="?")
parser.add_argument("--num_workers", type=int, default=1, nargs="?")
parser.add_argument("--model_path", type=str, default=f"{CIFAR10_ROOT}/cifar_net.pth", nargs="?")
return parser.parse_args()
def _main(args):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_path = args.dataset_path
batch_size = args.batch_size
num_workers = args.num_workers
model_path = args.model_path
trainset = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testset = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# (optional) use GPU to speed things up
net.to(DEVICE)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
# (optional) use GPU to speed things up
inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}")
running_loss = 0.0
print("Finished Training")
torch.save(net.state_dict(), model_path)
# wraps evaluation logic into a method to re-use for
# evaluation on both trained and received model
def evaluate(input_weights):
net = Net()
net.load_state_dict(input_weights)
# (optional) use GPU to speed things up
net.to(DEVICE)
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in testloader:
# (optional) use GPU to speed things up
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %")
return 100 * correct // total
# evaluation on local trained model
local_accuracy = evaluate(torch.load(model_path))
def main():
_main(define_parser())
if __name__ == "__main__":
main()
| NVFlare-main | examples/hello-world/step-by-step/cifar10/code/dl/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Dirichlet sampling strategy for creating a heterogeneous partition is adopted
# from FedMA (https://github.com/IBM/FedMA).
# MIT License
# Copyright (c) 2020 International Business Machines
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import torchvision.datasets as datasets
# default dataset path
CIFAR10_ROOT = "/tmp/nvflare/data/cifar10"
def define_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", type=str, default=CIFAR10_ROOT, nargs="?")
args = parser.parse_args()
return args
def main(args):
datasets.CIFAR10(root=args.dataset_path, train=True, download=True)
datasets.CIFAR10(root=args.dataset_path, train=False, download=True)
if __name__ == "__main__":
main(define_parser())
| NVFlare-main | examples/hello-world/step-by-step/cifar10/data/download.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.statistics_spec import Bin, DataType, Feature, Histogram, HistogramType, Statistics
# the dataset path
CIFAR10_ROOT = "/tmp/nvflare/data/cifar10"
class ImageStatistics(Statistics):
def __init__(self, data_root: str = CIFAR10_ROOT, batch_size: int = 4):
"""local image intensity calculator.
Args:
data_root: directory with local image data.
Returns:
Histogram of local statistics`
"""
super().__init__()
self.dataset_path = data_root
self.batch_size = batch_size
# there are three color channels : RGB, each corresponding to each channel index
# we are going treat each channel as one feature, the feature Ids are corresponding to tensor channel index.
# The feature name is named "red", "gree", "blue" (RGB).
self.features_ids = {"red": 0, "green": 1, "blue": 2}
self.image_features = [
Feature("red", DataType.FLOAT),
Feature("green", DataType.FLOAT),
Feature("blue", DataType.FLOAT),
]
self.dataset_lengths = {}
self.loaders = {}
self.client_name = None
self.fl_ctx = None
def initialize(self, fl_ctx: FLContext):
# FLContext is context information for the client side NVFLARE engine.
# it includes many runtime information.
# Here we only interested in client site name.
# fl_ctx.get_identity_name() will return the client's name
self.fl_ctx = fl_ctx
self.client_name = "local_client" if fl_ctx is None else fl_ctx.get_identity_name()
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root=self.dataset_path, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root=self.dataset_path, train=False, download=True, transform=transform)
self.dataset_lengths = {"train": len(trainset), "test": len(testset)}
trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size, shuffle=False, num_workers=2)
self.loaders = {"train": trainloader, "test": testloader}
def features(self) -> Dict[str, List[Feature]]:
return {"train": self.image_features, "test": self.image_features}
def count(self, dataset_name: str, feature_name: str) -> int:
return self.dataset_lengths[dataset_name]
def histogram(
self, dataset_name: str, feature_name: str, num_of_bins: int, global_min_value: float, global_max_value: float
) -> Histogram:
print(f"calculating image intensity histogram for client {self.client_name}")
channel = self.features_ids[feature_name]
# get the inputs; data is a list of [inputs, labels]
histogram_bins: List[Bin] = []
bin_edges = []
histogram = np.zeros(num_of_bins, dtype=float)
for inputs, _ in self.loaders[dataset_name]:
for img in inputs:
counts, bin_edges = np.histogram(
img[channel, :, :], bins=num_of_bins, range=(global_min_value, global_max_value)
)
histogram += counts
for i in range(num_of_bins):
low_value = bin_edges[i]
high_value = bin_edges[i + 1]
bin_sample_count = histogram[i]
histogram_bins.append(Bin(low_value=low_value, high_value=high_value, sample_count=bin_sample_count))
return Histogram(HistogramType.STANDARD, histogram_bins)
| NVFlare-main | examples/hello-world/step-by-step/cifar10/stats/image_statistics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class Net(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense1(x)
x = self.dropout(x)
x = self.dense2(x)
return x
| NVFlare-main | examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_constant import FLContextKey
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import ModelLearnable, make_model_learnable
from nvflare.app_common.abstract.model_persistor import ModelPersistor
from nvflare.app_common.app_constant import AppConstants
from nvflare.fuel.utils import fobs
class TF2ModelPersistor(ModelPersistor):
def __init__(self, save_name="tf2_model.fobs"):
super().__init__()
self.save_name = save_name
def _initialize(self, fl_ctx: FLContext):
# get save path from FLContext
app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT)
env = None
run_args = fl_ctx.get_prop(FLContextKey.ARGS)
if run_args:
env_config_file_name = os.path.join(app_root, run_args.env)
if os.path.exists(env_config_file_name):
try:
with open(env_config_file_name) as file:
env = json.load(file)
except:
self.system_panic(
reason="error opening env config file {}".format(env_config_file_name), fl_ctx=fl_ctx
)
return
if env is not None:
if env.get("APP_CKPT_DIR", None):
fl_ctx.set_prop(AppConstants.LOG_DIR, env["APP_CKPT_DIR"], private=True, sticky=True)
if env.get("APP_CKPT") is not None:
fl_ctx.set_prop(
AppConstants.CKPT_PRELOAD_PATH,
env["APP_CKPT"],
private=True,
sticky=True,
)
log_dir = fl_ctx.get_prop(AppConstants.LOG_DIR)
if log_dir:
self.log_dir = os.path.join(app_root, log_dir)
else:
self.log_dir = app_root
self._fobs_save_path = os.path.join(self.log_dir, self.save_name)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
fl_ctx.sync_sticky()
def load_model(self, fl_ctx: FLContext) -> ModelLearnable:
"""
initialize and load the Model.
Args:
fl_ctx: FLContext
Returns:
Model object
"""
if os.path.exists(self._fobs_save_path):
self.logger.info("Loading server weights")
with open(self._fobs_save_path, "rb") as f:
model_learnable = fobs.load(f)
else:
self.logger.info("Initializing server model")
network = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
network.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = network(tf.keras.Input(shape=(28, 28)))
var_dict = {network.get_layer(index=key).name: value for key, value in enumerate(network.get_weights())}
model_learnable = make_model_learnable(var_dict, dict())
return model_learnable
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self._initialize(fl_ctx)
def save_model(self, model_learnable: ModelLearnable, fl_ctx: FLContext):
"""Saves model.
Args:
model_learnable: ModelLearnable object
fl_ctx: FLContext
"""
model_learnable_info = {k: str(type(v)) for k, v in model_learnable.items()}
self.logger.info(f"Saving aggregated server weights: \n {model_learnable_info}")
with open(self._fobs_save_path, "wb") as f:
fobs.dump(model_learnable, f)
| NVFlare-main | examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_model_persistor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tf2_net import Net
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.event_type import EventType
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
class SimpleTrainer(Executor):
def __init__(self, epochs_per_round):
super().__init__()
self.epochs_per_round = epochs_per_round
self.train_images, self.train_labels = None, None
self.test_images, self.test_labels = None, None
self.model = None
self.var_list = None
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self.setup(fl_ctx)
def setup(self, fl_ctx: FLContext):
(self.train_images, self.train_labels), (
self.test_images,
self.test_labels,
) = tf.keras.datasets.mnist.load_data()
self.train_images, self.test_images = (
self.train_images / 255.0,
self.test_images / 255.0,
)
# simulate separate datasets for each client by dividing MNIST dataset in half
client_name = fl_ctx.get_identity_name()
if client_name == "site-1":
self.train_images = self.train_images[: len(self.train_images) // 2]
self.train_labels = self.train_labels[: len(self.train_labels) // 2]
self.test_images = self.test_images[: len(self.test_images) // 2]
self.test_labels = self.test_labels[: len(self.test_labels) // 2]
elif client_name == "site-2":
self.train_images = self.train_images[len(self.train_images) // 2 :]
self.train_labels = self.train_labels[len(self.train_labels) // 2 :]
self.test_images = self.test_images[len(self.test_images) // 2 :]
self.test_labels = self.test_labels[len(self.test_labels) // 2 :]
model = Net()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
_ = model(tf.keras.Input(shape=(28, 28)))
self.var_list = [model.get_layer(index=index).name for index in range(len(model.get_weights()))]
self.model = model
def execute(
self,
task_name: str,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
"""
This function is an extended function from the super class.
As a supervised learning based trainer, the train function will run
evaluate and train engines based on model weights from `shareable`.
After finishing training, a new `Shareable` object will be submitted
to server for aggregation.
Args:
task_name: dispatched task
shareable: the `Shareable` object received from server.
fl_ctx: the `FLContext` object received from server.
abort_signal: if triggered, the training will be aborted.
Returns:
a new `Shareable` object to be submitted to server for aggregation.
"""
# retrieve model weights download from server's shareable
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
if task_name != "train":
return make_reply(ReturnCode.TASK_UNKNOWN)
dxo = from_shareable(shareable)
model_weights = dxo.data
# use previous round's client weights to replace excluded layers from server
prev_weights = {
self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())
}
ordered_model_weights = {key: model_weights.get(key) for key in prev_weights}
for key in self.var_list:
value = ordered_model_weights.get(key)
if np.all(value == 0):
ordered_model_weights[key] = prev_weights[key]
# update local model weights with received weights
self.model.set_weights(list(ordered_model_weights.values()))
# adjust LR or other training time info as needed
# such as callback in the fit function
self.model.fit(
self.train_images,
self.train_labels,
epochs=self.epochs_per_round,
validation_data=(self.test_images, self.test_labels),
)
# report updated weights in shareable
weights = {self.model.get_layer(index=key).name: value for key, value in enumerate(self.model.get_weights())}
dxo = DXO(data_kind=DataKind.WEIGHTS, data=weights)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
new_shareable = dxo.to_shareable()
return new_shareable
| NVFlare-main | examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/trainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CLIException(Exception):
pass
| NVFlare-main | nvflare/cli_exception.py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> main)"
git_full = "4902bc785dbdf55300da981e61d76efc11393b35"
git_date = "2023-09-14 09:24:31 -0700"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "nvflare/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s%s" % (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| NVFlare-main | nvflare/_version.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import _version
__version__ = _version.get_versions()["version"]
# https://github.com/microsoft/pylance-release/issues/856
from nvflare.private.fed.app.simulator.simulator_runner import SimulatorRunner as SimulatorRunner
| NVFlare-main | nvflare/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.cli_exception import CLIException
class CLIUnknownCmdException(CLIException):
pass
| NVFlare-main | nvflare/cli_unknown_cmd_exception.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import traceback
from nvflare.cli_exception import CLIException
from nvflare.cli_unknown_cmd_exception import CLIUnknownCmdException
from nvflare.dashboard.cli import define_dashboard_parser, handle_dashboard
from nvflare.fuel.hci.tools.authz_preview import define_authz_preview_parser, run_command
from nvflare.lighter.provision import define_provision_parser, handle_provision
from nvflare.private.fed.app.simulator.simulator import define_simulator_parser, run_simulator
from nvflare.tool.job.job_cli import def_job_cli_parser, handle_job_cli_cmd
from nvflare.tool.poc.poc_commands import def_poc_parser, handle_poc_cmd
from nvflare.tool.preflight_check import check_packages, define_preflight_check_parser
from nvflare.utils.cli_utils import (
create_job_template_config,
create_poc_workspace_config,
create_startup_kit_config,
get_hidden_config,
save_config,
)
CMD_POC = "poc"
CMD_PROVISION = "provision"
CMD_PREFLIGHT_CHECK = "preflight_check"
CMD_SIMULATOR = "simulator"
CMD_DASHBOARD = "dashboard"
CMD_AUTHZ_PREVIEW = "authz_preview"
CMD_JOB = "job"
CMD_CONFIG = "config"
def check_python_version():
if sys.version_info >= (3, 11):
raise RuntimeError("Python versions 3.11 and above are not yet supported. Please use Python 3.8, 3.9 or 3.10.")
if sys.version_info < (3, 8):
raise RuntimeError("Python versions 3.6 and below are not supported. Please use Python 3.8, 3.9 or 3.10")
def def_provision_parser(sub_cmd):
cmd = CMD_PROVISION
provision_parser = sub_cmd.add_parser(cmd)
define_provision_parser(provision_parser)
return {cmd: [provision_parser]}
def def_dashboard_parser(sub_cmd):
cmd = CMD_DASHBOARD
dashboard_parser = sub_cmd.add_parser(cmd)
define_dashboard_parser(dashboard_parser)
return {cmd: [dashboard_parser]}
def def_preflight_check_parser(sub_cmd):
cmd = CMD_PREFLIGHT_CHECK
checker_parser = sub_cmd.add_parser(cmd)
define_preflight_check_parser(checker_parser)
return {cmd: checker_parser}
def def_simulator_parser(sub_cmd):
cmd = CMD_SIMULATOR
simulator_parser = sub_cmd.add_parser(cmd)
define_simulator_parser(simulator_parser)
return {cmd: simulator_parser}
def handle_simulator_cmd(simulator_args):
status = run_simulator(simulator_args)
# make sure the script terminate after run
if status:
sys.exit(status)
def def_authz_preview_parser(sub_cmd):
cmd = CMD_AUTHZ_PREVIEW
authz_preview_parser = sub_cmd.add_parser(cmd)
define_authz_preview_parser(authz_preview_parser)
return {cmd: authz_preview_parser}
def handle_authz_preview(args):
run_command(args)
def def_config_parser(sub_cmd):
cmd = "config"
config_parser = sub_cmd.add_parser(cmd)
config_parser.add_argument(
"-d", "--startup_kit_dir", type=str, nargs="?", default=None, help="startup kit location"
)
config_parser.add_argument(
"-pw", "--poc_workspace_dir", type=str, nargs="?", default=None, help="POC workspace location"
)
config_parser.add_argument(
"-jt", "--job_templates_dir", type=str, nargs="?", default=None, help="job templates location"
)
config_parser.add_argument("-debug", "--debug", action="store_true", help="debug is on")
return {cmd: config_parser}
def handle_config_cmd(args):
config_file_path, nvflare_config = get_hidden_config()
nvflare_config = create_startup_kit_config(nvflare_config, args.startup_kit_dir)
nvflare_config = create_poc_workspace_config(nvflare_config, args.poc_workspace_dir)
nvflare_config = create_job_template_config(nvflare_config, args.job_templates_dir)
save_config(nvflare_config, config_file_path)
def parse_args(prog_name: str):
_parser = argparse.ArgumentParser(description=prog_name)
_parser.add_argument("--version", "-V", action="store_true", help="print nvflare version")
sub_cmd = _parser.add_subparsers(description="sub command parser", dest="sub_command")
sub_cmd_parsers = {}
sub_cmd_parsers.update(def_poc_parser(sub_cmd))
sub_cmd_parsers.update(def_preflight_check_parser(sub_cmd))
sub_cmd_parsers.update(def_provision_parser(sub_cmd))
sub_cmd_parsers.update(def_simulator_parser(sub_cmd))
sub_cmd_parsers.update(def_dashboard_parser(sub_cmd))
sub_cmd_parsers.update(def_authz_preview_parser(sub_cmd))
sub_cmd_parsers.update(def_job_cli_parser(sub_cmd))
sub_cmd_parsers.update(def_config_parser(sub_cmd))
args, argv = _parser.parse_known_args(None, None)
cmd = args.__dict__.get("sub_command")
sub_cmd_parser = sub_cmd_parsers.get(cmd)
if argv:
msg = f"{prog_name} {cmd}: unrecognized arguments: {''.join(argv)}\n"
print(f"\nerror: {msg}")
sub_cmd_parser.print_help()
_parser.exit(2, "\n")
return _parser, _parser.parse_args(), sub_cmd_parsers
handlers = {
CMD_POC: handle_poc_cmd,
CMD_PROVISION: handle_provision,
CMD_PREFLIGHT_CHECK: check_packages,
CMD_SIMULATOR: handle_simulator_cmd,
CMD_DASHBOARD: handle_dashboard,
CMD_AUTHZ_PREVIEW: handle_authz_preview,
CMD_JOB: handle_job_cli_cmd,
CMD_CONFIG: handle_config_cmd,
}
def run(prog_name):
cwd = os.getcwd()
sys.path.append(cwd)
prog_parser, prog_args, sub_cmd_parsers = parse_args(prog_name)
sub_cmd = None
try:
sub_cmd = prog_args.sub_command
if sub_cmd:
handlers[sub_cmd](prog_args)
elif prog_args.version:
print_nvflare_version()
else:
prog_parser.print_help()
except CLIUnknownCmdException as e:
print(e)
print_help(prog_parser, sub_cmd, sub_cmd_parsers)
sys.exit(1)
except CLIException as e:
print(e)
sys.exit(1)
except Exception as e:
print(f"\nUnable to handle command: {sub_cmd} due to: {e} \n")
if prog_args.debug:
print(traceback.format_exc())
else:
print_help(prog_parser, sub_cmd, sub_cmd_parsers)
def print_help(prog_parser, sub_cmd, sub_cmd_parsers):
if sub_cmd:
sub_parser = sub_cmd_parsers[sub_cmd]
if sub_parser:
sub_parser.print_help()
else:
prog_parser.print_help()
else:
prog_parser.print_help()
def print_nvflare_version():
import nvflare
print(f"NVFlare version is {nvflare.__version__}")
def main():
check_python_version()
run("nvflare")
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/cli.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zlib
class Checksum:
def __init__(self):
self.current_value = 0
def update(self, data):
self.current_value = zlib.crc32(data, self.current_value)
def result(self):
return self.current_value & 0xFFFFFFFF
| NVFlare-main | nvflare/fuel/hci/checksum.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
def bytes_to_b64str(data_bytes) -> str:
"""Convert binary to base64-encoded string."""
encoded_bytes = base64.b64encode(data_bytes)
return encoded_bytes.decode("ascii")
def b64str_to_bytes(b64str: str) -> bytes:
"""Convert base64-encoded string to binary."""
encoded_bytes = b64str.encode("ascii")
return base64.b64decode(encoded_bytes)
def binary_file_to_b64str(file_name) -> str:
"""Encode content of a binary file to a Base64-encoded ASCII string.
Args:
file_name: the binary file to be processed
Returns: base64-encoded ASCII string
"""
data_bytes = open(file_name, "rb").read()
return bytes_to_b64str(data_bytes)
def b64str_to_binary_file(b64str: str, file_name):
"""Decode a base64-encoded string and write it into a binary file.
Args:
b64str: the base64-encoded ASCII string
file_name: the file to write to
Returns: number of bytes written
"""
data_bytes = b64str_to_bytes(b64str)
with open(file_name, "wb") as f:
f.write(data_bytes)
return len(data_bytes)
def text_file_to_b64str(file_name) -> str:
"""Encode content of a text file to a Base64-encoded ASCII string.
Args:
file_name: name of the text file
Returns: base64-encoded string
"""
data_string = open(file_name, "r").read()
data_bytes = data_string.encode("utf-8")
return bytes_to_b64str(data_bytes)
def b64str_to_text_file(b64str: str, file_name):
"""Decode a base64-encoded string and write result into a text file.
Args:
b64str: base64-encoded string
file_name: file to be created
Returns: number of data types written (may not be the same as number of characters)
"""
data_bytes = b64str_to_bytes(b64str)
data_string = data_bytes.decode("utf-8")
with open(file_name, "w") as f:
f.write(data_string)
return len(data_bytes)
| NVFlare-main | nvflare/fuel/hci/base64_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import os
import re
import shlex
from typing import List
from nvflare.apis.utils.format_check import type_pattern_mapping
def split_to_args(line: str) -> List[str]:
if '"' in line:
return shlex.split(line)
else:
line = re.sub(" +", " ", line)
return line.split(" ")
def join_args(segs: List[str]) -> str:
result = ""
sep = ""
for a in segs:
parts = a.split()
if len(parts) < 2:
p = parts[0]
else:
p = '"' + a + '"'
result = result + sep + p
sep = " "
return result
class ArgValidator(argparse.ArgumentParser):
def __init__(self, name):
"""Validator for admin shell commands that uses argparse to check arguments and get usage through print_help.
Args:
name: name of the program to pass to ArgumentParser
"""
argparse.ArgumentParser.__init__(self, prog=name, add_help=False)
self.err = ""
def error(self, message):
self.err = message
def validate(self, args):
try:
result = self.parse_args(args)
return self.err, result
except Exception:
return 'argument error; try "? cmdName to show supported usage for a command"', None
def get_usage(self) -> str:
buffer = io.StringIO()
self.print_help(buffer)
usage_output = buffer.getvalue().split("\n", 1)[1]
buffer.close()
return usage_output
def process_targets_into_str(targets: List[str]) -> str:
if not isinstance(targets, list):
raise SyntaxError("targets is not a list.")
if not all(isinstance(t, str) for t in targets):
raise SyntaxError("all targets in the list of targets must be strings.")
for t in targets:
try:
validate_required_target_string(t)
except SyntaxError:
raise SyntaxError(f"invalid target {t}")
return " ".join(targets)
def validate_required_target_string(target: str) -> str:
"""Returns the target string if it exists and is valid."""
if not target:
raise SyntaxError("target is required but not specified.")
if not isinstance(target, str):
raise SyntaxError("target is not str.")
if not re.match("^[A-Za-z0-9._-]*$", target):
raise SyntaxError("target must be a string of only valid characters and no spaces.")
return target
def validate_options_string(options: str) -> str:
"""Returns the options string if it is valid."""
if not isinstance(options, str):
raise SyntaxError("options is not str.")
if not re.match("^[A-Za-z0-9- ]*$", options):
raise SyntaxError("options must be a string of only valid characters.")
return options
def validate_path_string(path: str) -> str:
"""Returns the path string if it is valid."""
if not isinstance(path, str):
raise SyntaxError("path is not str.")
if not re.match("^[A-Za-z0-9-._/]*$", path):
raise SyntaxError("unsupported characters in path {}".format(path))
if os.path.isabs(path):
raise SyntaxError("absolute path is not allowed")
paths = path.split(os.path.sep)
for p in paths:
if p == "..":
raise SyntaxError(".. in path name is not allowed")
return path
def validate_file_string(file: str) -> str:
"""Returns the file string if it is valid."""
validate_path_string(file)
basename, file_extension = os.path.splitext(file)
if file_extension not in [".txt", ".log", ".json", ".csv", ".sh", ".config", ".py"]:
raise SyntaxError(
"this command cannot be applied to file {}. Only files with the following extensions are "
"permitted: .txt, .log, .json, .csv, .sh, .config, .py".format(file)
)
return file
def validate_sp_string(sp_string) -> str:
if re.match(
type_pattern_mapping.get("sp_end_point"),
sp_string,
):
return sp_string
else:
raise SyntaxError("sp_string must be of the format example.com:8002:8003")
| NVFlare-main | nvflare/fuel/hci/cmd_arg_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import hashlib
import os
import uuid
class IdentityKey(object):
NAME = "common_name"
ORG = "organization"
ROLE = "role"
def hash_password(password):
"""Hash a password for storing.
Args:
password: password to hash
Returns: hashed password
"""
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode("ascii")
pwd_hash = hashlib.pbkdf2_hmac(hash_name="sha512", password=password.encode("utf-8"), salt=salt, iterations=100000)
pwd_hash = binascii.hexlify(pwd_hash)
return (salt + pwd_hash).decode("ascii")
def verify_password(stored_password, provided_password):
"""Verify a stored password against one provided by user.
Args:
stored_password: stored password
provided_password: password provided by user
Returns: True if the stored password equals the provided password, otherwise False
"""
salt = stored_password[:64]
stored_password = stored_password[64:]
pwd_hash = hashlib.pbkdf2_hmac(
hash_name="sha512", password=provided_password.encode("utf-8"), salt=salt.encode("ascii"), iterations=100000
)
pwd_hash = binascii.hexlify(pwd_hash).decode("ascii")
return pwd_hash == stored_password
def make_session_token():
"""Makes a new session token.
Returns: created session token
"""
t = uuid.uuid1()
return str(t)
def get_identity_info(cert: dict):
"""Gets the identity information from the provided certificate.
Args:
cert: certificate
Returns: if the cert is None, returning None.
if the cert is a dictinary, returning a dictionary containing three keys, common_name, organization and role.
"""
if cert is None:
return None
cn = None
role = None
organization = None
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
cn = value
elif key == "unstructuredName":
role = value
elif key == "organizationName":
organization = value
return {"common_name": cn, "organization": organization, "role": role}
def get_certificate_common_name(cert: dict):
"""Gets the common name of the provided certificate.
Args:
cert: certificate
Returns: common name of provided cert
"""
if cert is None:
return None
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
return value
def get_certificate_identity(cert: dict) -> dict:
"""Gets the identity info of the provided certificate.
Args:
cert: certificate
Returns: identity info in a dict with following keys: name, org, role
"""
if cert is None:
return None
result = {}
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
result[IdentityKey.NAME] = value
elif key == "org":
result[IdentityKey.ORG] = value
elif key == "role":
result[IdentityKey.ROLE] = value
return result
| NVFlare-main | nvflare/fuel/hci/security.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/hci/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from .checksum import Checksum
HEADER_STRUCT = struct.Struct(">BII") # marker(1), seq_num(4), size(4)
HEADER_LEN = HEADER_STRUCT.size
MARKER_DATA = 101
MARKER_END = 102
MAX_CHUNK_SIZE = 1024 * 1024
def get_slice(buf, start: int, length: int):
return buf[start : start + length]
class Header:
def __init__(self, marker, num1, num2):
self.marker = marker
self.checksum = 0
self.seq = 0
self.size = 0
if marker == MARKER_DATA:
self.seq = num1
self.size = num2
elif marker == MARKER_END:
if num1 != 0:
raise ValueError(f"num1 must be 0 for checksum but got {num1}")
self.checksum = num2
else:
raise ValueError(f"invalid chunk marker {marker}")
def __str__(self):
d = {
"marker": self.marker,
"seq": self.seq,
"size": self.size,
"checksum": self.checksum,
}
return f"{d}"
@classmethod
def from_bytes(cls, buffer: bytes):
if len(buffer) < HEADER_LEN:
raise ValueError("Prefix too short")
marker, num1, num2 = HEADER_STRUCT.unpack_from(buffer, 0)
return Header(marker, num1, num2)
def to_bytes(self):
if self.marker == MARKER_DATA:
num1 = self.seq
num2 = self.size
else:
num1 = 0
num2 = self.checksum
return HEADER_STRUCT.pack(self.marker, num1, num2)
class ChunkState:
def __init__(self, expect_seq=1):
self.header_bytes = bytearray()
self.header = None
self.received = 0
self.expect_seq = expect_seq
def __str__(self):
d = {
"header": f"{self.header}",
"header_bytes": f"{self.header_bytes}",
"received": self.received,
"expect_seq": self.expect_seq,
}
return f"{d}"
def unpack_header(self):
self.header = Header.from_bytes(self.header_bytes)
if self.header.marker == MARKER_DATA:
if self.header.seq != self.expect_seq:
raise RuntimeError(
f"Protocol Error: received seq {self.header.seq} does not match expected seq {self.expect_seq}"
)
if self.header.size < 0 or self.header.size > MAX_CHUNK_SIZE:
raise RuntimeError(f"Protocol Error: received size {self.header.size} is not in [0, {MAX_CHUNK_SIZE}]")
def is_last(self):
return self.header and self.header.marker == MARKER_END
class Receiver:
def __init__(self, receive_data_func):
self.receive_data_func = receive_data_func
self.checksum = Checksum()
self.current_state = ChunkState()
self.done = False
def receive(self, data) -> bool:
if self.done:
raise RuntimeError("this receiver is already done")
s = chunk_it(self.current_state, data, 0, self._process_chunk)
self.current_state = s
done = s.is_last()
if done:
self.done = True
# compare checksum
expected_checksum = self.checksum.result()
if expected_checksum != s.header.checksum:
raise RuntimeError(f"checksum mismatch: expect {expected_checksum} but received {s.header.checksum}")
return done
def _process_chunk(self, c: ChunkState, data, start: int, length: int):
self.checksum.update(get_slice(data, start, length))
if self.receive_data_func:
self.receive_data_func(data, start, length)
class Sender:
def __init__(self, send_data_func):
self.send_data_func = send_data_func
self.checksum = Checksum()
self.next_seq = 1
self.closed = False
def send(self, data):
if self.closed:
raise RuntimeError("this sender is already closed")
if data is None:
data = b""
header = Header(MARKER_DATA, self.next_seq, len(data))
self.next_seq += 1
self.checksum.update(data)
header_bytes = header.to_bytes()
self.send_data_func(header_bytes)
self.send_data_func(data)
def close(self):
if self.closed:
raise RuntimeError("this sender is already closed")
self.closed = True
cs = self.checksum.result()
header = Header(MARKER_END, 0, cs)
header_bytes = header.to_bytes()
self.send_data_func(header_bytes)
def chunk_it(c: ChunkState, data, cursor: int, process_chunk_func) -> ChunkState:
if not isinstance(data, (bytearray, bytes)):
raise ValueError(f"can only chunk bytes data but got {type(data)}")
data_len = len(data)
if data_len <= 0:
return c
if cursor < 0 or cursor >= data_len:
raise ValueError(f"cursor {cursor} is out of data range [0, {data_len-1}]")
data_len -= cursor
header_bytes_len = len(c.header_bytes)
if header_bytes_len < HEADER_LEN:
# header not completed yet
num_bytes_needed = HEADER_LEN - header_bytes_len
# need this many bytes for header
if data_len >= num_bytes_needed:
# data has enough bytes
c.header_bytes.extend(get_slice(data, cursor, num_bytes_needed))
cursor += num_bytes_needed
data_len -= num_bytes_needed
c.unpack_header() # header bytes are ready
else:
c.header_bytes.extend(get_slice(data, cursor, data_len))
return c
if data_len == 0 or c.is_last():
return c
lack = c.header.size - c.received
if data_len <= lack:
# remaining data is part of the chunk
c.received += data_len
process_chunk_func(c, data, cursor, data_len)
if c.received == c.header.size:
# this chunk is completed: start a new chunk
return ChunkState(c.header.seq + 1)
else:
# this chunk is not done
return c
else:
# some remaining data is part of the chunk, but after that belongs to next chunk
c.received += lack
process_chunk_func(c, data, cursor, lack)
cursor += lack
next_chunk = ChunkState(c.header.seq + 1)
return chunk_it(next_chunk, data, cursor, process_chunk_func)
| NVFlare-main | nvflare/fuel/hci/chunk.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.hci.cmd_arg_utils import ArgValidator
class ShellCommandValidator(object):
def __init__(self, arg_validator: ArgValidator):
"""Base class for validators to be called by command executors for shell commands.
Args:
arg_validator: instance of ArgValidator
"""
self.arg_validator = arg_validator
def validate(self, args: List[str]):
self.arg_validator.err = ""
return self.arg_validator.validate(args)
def get_usage(self):
return self.arg_validator.get_usage()
class TailValidator(ShellCommandValidator):
def __init__(self):
"""Validator for the tail command."""
val = ArgValidator("tail")
val.add_argument("-c", type=int, help="output the last C bytes")
val.add_argument("-n", type=int, help="output the last N lines")
val.add_argument("files", metavar="file", type=str, nargs="+")
ShellCommandValidator.__init__(self, val)
class HeadValidator(ShellCommandValidator):
def __init__(self):
"""Validator for the head command."""
val = ArgValidator("head")
val.add_argument("-c", type=int, help="print the first C bytes of each file")
val.add_argument("-n", type=int, help="print the first N lines instead of the first 10")
val.add_argument("files", metavar="file", type=str, nargs="+")
ShellCommandValidator.__init__(self, val)
class GrepValidator(ShellCommandValidator):
def __init__(self):
"""Validator for the grep command."""
val = ArgValidator("grep")
val.add_argument("-n", action="store_true", help="print line number with output lines")
val.add_argument("-i", action="store_true", help="ignore case distinctions")
val.add_argument("-b", action="store_true", help="print the byte offset with output lines")
val.add_argument("pattern", metavar="pattern", type=str)
val.add_argument("files", metavar="file", type=str, nargs="+")
ShellCommandValidator.__init__(self, val)
class CatValidator(ShellCommandValidator):
def __init__(self):
"""Validator for the cat command."""
val = ArgValidator("cat")
val.add_argument("-n", action="store_true", help="number all output lines")
val.add_argument("-b", action="store_true", help="number nonempty output lines, overrides -n")
val.add_argument("-s", action="store_true", help="suppress repeated empty output lines")
val.add_argument("-T", action="store_true", help="display TAB characters as ^I")
val.add_argument("files", metavar="file", type=str, nargs="+")
ShellCommandValidator.__init__(self, val)
class LsValidator(ShellCommandValidator):
def __init__(self):
"""Validator for the ls command."""
val = ArgValidator("ls")
val.add_argument("-a", action="store_true")
val.add_argument("-l", action="store_true", help="use a long listing format")
val.add_argument("-t", action="store_true", help="sort by modification time, newest first")
val.add_argument("-S", action="store_true", help="sort by file size, largest first")
val.add_argument("-R", action="store_true", help="list subdirectories recursively")
val.add_argument("-u", action="store_true", help="with -l: show access time, otherwise: sort by access time")
val.add_argument("files", metavar="file", type=str, nargs="?")
ShellCommandValidator.__init__(self, val)
| NVFlare-main | nvflare/fuel/hci/shell_cmd_val.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.hci.proto import ConfirmMethod
class CommandSpec(object):
valid_confirms = ["none", ConfirmMethod.YESNO, ConfirmMethod.AUTH]
def __init__(
self,
name: str,
description: str,
usage: str,
handler_func,
authz_func=None,
visible=True,
confirm=None,
client_cmd=None,
enabled=True,
scope_name="",
):
"""Specification of a command within a CommandModuleSpec to register into CommandRegister as a CommandEntry.
Args:
name: command name
description: command description text
usage: string to show usage of the command
handler_func: function to call for executing the command.
authz_func: called to preprocess the command by AuthzFilter.
visible: whether the command is visible or not
confirm: whether the command needs confirmation to execute
"""
self.name = name
self.description = description
self.usage = usage
self.handler_func = handler_func
self.authz_func = authz_func
self.visible = visible
self.confirm = confirm
self.client_cmd = client_cmd
self.enabled = enabled
self.scope_name = scope_name
if not confirm:
self.confirm = "none"
else:
assert confirm in CommandSpec.valid_confirms
class CommandModuleSpec(object):
def __init__(self, name: str, cmd_specs: List[CommandSpec], conn_props: dict = None):
"""Specification for a command module containing a list of commands in the form of CommandSpec.
Args:
name: becomes the scope name of the commands in cmd_specs when registered in CommandRegister
cmd_specs: list of CommandSpec objects with
conn_props: conn properties declared by the module
"""
self.name = name
self.cmd_specs = cmd_specs
self.conn_props = conn_props
class CommandModule(object):
"""Base class containing CommandModuleSpec."""
def get_spec(self) -> CommandModuleSpec:
pass
def generate_module_spec(self, server_cmd_spec: CommandSpec):
pass
def close(self):
pass
class CommandEntry(object):
def __init__(self, scope, name, desc, usage, handler, authz_func, visible, confirm, client_cmd):
"""Contains information about a command. This is registered in Scope within CommandRegister.
Args:
scope: scope for this command
name: command name
desc: command description text
usage: string to show usage of the command
handler: function to call for executing the command
authz_func: authorization function to run to get a tuple of (valid, authz_ctx) in AuthzFilter
visible: whether the command is visible or not
confirm: whether the command needs confirmation to execute
"""
self.scope = scope
self.name = name
self.desc = desc
self.usage = usage
self.handler = handler
self.authz_func = authz_func
self.visible = visible
self.confirm = confirm
self.client_cmd = client_cmd
def full_command_name(self) -> str:
return "{}.{}".format(self.scope.name, self.name)
class _Scope(object):
def __init__(self, name: str):
"""A container grouping CommandEntry objects inside CommandRegister.
Args:
name: name of scope grouping commands
"""
self.name = name
self.entries = {}
def register_command(
self, cmd_name: str, cmd_desc: str, cmd_usage: str, handler_func, authz_func, visible, confirm, client_cmd
):
self.entries[cmd_name] = CommandEntry(
self, cmd_name, cmd_desc, cmd_usage, handler_func, authz_func, visible, confirm, client_cmd
)
class CommandRegister(object):
def __init__(self, app_ctx):
"""Object containing the commands in scopes once they have been registered.
ServerCommandRegister is derived from this class and calls the handler of the command through
``process_command`` and ``_do_command``. This is also used to register commands for the admin client.
Args:
app_ctx: app context
"""
self.app_ctx = app_ctx
self.scopes = {}
self.cmd_map = {}
self.modules = []
self.conn_props = {} # conn properties from modules
self.mapped_cmds = []
def _get_scope(self, name: str):
scope = self.scopes.get(name, None)
if scope is None:
scope = _Scope(name)
self.scopes[name] = scope
return scope
def get_command_entries(self, cmd_name: str):
return self.cmd_map.get(cmd_name, [])
def register_module_spec(self, module_spec: CommandModuleSpec, include_invisible=True):
for cmd_spec in module_spec.cmd_specs:
assert isinstance(cmd_spec, CommandSpec)
cmd_spec.scope_name = module_spec.name
if cmd_spec.enabled and (cmd_spec.visible or include_invisible):
self.add_command(
scope_name=module_spec.name,
cmd_name=cmd_spec.name,
desc=cmd_spec.description,
usage=cmd_spec.usage,
handler=cmd_spec.handler_func,
authz_func=cmd_spec.authz_func,
visible=cmd_spec.visible,
confirm=cmd_spec.confirm,
client_cmd=cmd_spec.client_cmd,
)
conn_props = module_spec.conn_props
if conn_props:
self.conn_props.update(conn_props)
def register_module(self, module: CommandModule, include_invisible=True):
self.modules.append(module)
module_spec = module.get_spec()
self.register_module_spec(module_spec, include_invisible)
def add_command(
self,
scope_name,
cmd_name,
desc,
usage,
handler,
authz_func,
visible,
confirm,
client_cmd=None,
map_client_cmd=False,
):
if client_cmd and map_client_cmd:
self.mapped_cmds.append(
CommandSpec(
scope_name=scope_name,
name=cmd_name,
description=desc,
usage=usage,
confirm=confirm,
visible=visible,
handler_func=None,
client_cmd=client_cmd,
)
)
return
scope = self._get_scope(scope_name)
scope.register_command(
cmd_name=cmd_name,
cmd_desc=desc,
cmd_usage=usage,
handler_func=handler,
authz_func=authz_func,
visible=visible,
confirm=confirm,
client_cmd=client_cmd,
)
def _add_cmd_entry(self, cmd_name, entry):
entry_list = self.cmd_map.get(cmd_name, None)
if entry_list is None:
entry_list = []
self.cmd_map[cmd_name] = entry_list
entry_list.append(entry)
def finalize(self, add_cmd_func=None):
if len(self.cmd_map) > 0:
# already finalized
return
for scope_name, scope in self.scopes.items():
for cmd_name, entry in scope.entries.items():
assert isinstance(entry, CommandEntry)
self._add_cmd_entry(cmd_name, entry)
self._add_cmd_entry(entry.full_command_name(), entry)
if add_cmd_func:
add_cmd_func(entry)
| NVFlare-main | nvflare/fuel/hci/reg.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
def repeat_to_length(string_to_expand, length):
"""Repeats string_to_expand to fill up a string of the provided length.
Args:
string_to_expand: string to repeat
length: length of string to return
Returns: generated string of provided length
"""
return (string_to_expand * (int(length / len(string_to_expand)) + 1))[:length]
class Table(object):
def __init__(self, headers: Optional[List[str]] = None, meta_rows=None):
"""A structure with header and rows of records.
Note:
The header will be converted to capital letters.
Args:
headers: headers of the table
"""
self.rows = []
self.meta_rows = meta_rows
if headers and len(headers) > 0:
new_headers = []
for h in headers:
new_headers.append(h.upper())
self.rows.append(new_headers)
def set_rows(self, rows, meta_rows=None):
"""Sets the rows of records."""
self.rows = rows
self.meta_rows = meta_rows
def add_row(self, row: List[str], meta: dict = None):
"""Adds a record."""
self.rows.append(row)
if meta:
self.meta_rows.append(meta)
def write(self, writer):
# compute the number of cols
num_cols = 0
for row in self.rows:
if num_cols < len(row):
num_cols = len(row)
# compute max col size
col_len = [0 for _ in range(num_cols)]
for row in self.rows:
for i in range(len(row)):
if col_len[i] < len(row[i]):
col_len[i] = len(row[i])
col_fmt = ["" for _ in range(num_cols)]
for i in range(num_cols):
if i == 0:
extra = ""
else:
extra = " "
col_fmt[i] = extra + "| {:" + "{}".format(col_len[i]) + "}"
if i == num_cols - 1:
col_fmt[i] = col_fmt[i] + " |"
total_col_size = 0
for i in range(num_cols):
total_col_size += col_len[i] + 2
table_width = total_col_size + num_cols + 1
border_line = repeat_to_length("-", table_width)
writer.write(border_line + "\n")
for r in range(len(self.rows)):
row = self.rows[r]
line = ""
for i in range(num_cols):
if i < len(row):
data = row[i]
else:
data = " "
line += col_fmt[i].format(data)
writer.write(line + "\n")
if r == 0:
writer.write(border_line + "\n")
writer.write(border_line + "\n")
| NVFlare-main | nvflare/fuel/hci/table.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.common.ctx import BaseContext
from .chunk import MAX_CHUNK_SIZE, Receiver
from .proto import Buffer, validate_proto
from .table import Table
# ASCII Message Format:
#
# Only ASCII chars can be used in message;
# A message consists of multiple lines, each ended with the LINE_END char;
# The message is ended with the ALL_END char.
LINE_END = "\x03" # Indicates the end of the line (end of text)
ALL_END = "\x04" # Marks the end of a complete transmission (End of Transmission)
MAX_MSG_SIZE = 1024
def receive_til_end(sock, end=ALL_END):
total_data = []
while True:
data = str(sock.recv(1024), "utf-8")
if end in data:
total_data.append(data[: data.find(end)])
break
total_data.append(data)
result = "".join(total_data)
return result.replace(LINE_END, "")
# Returns:
# seg1 - the text before the end
# seg2 - the text after the end
# if end is not found, seg2 is None
# if end is found, seg2 is a string
def _split_data(data: str):
# first determine whether the data contains ALL_END
# anything after ALL_END is dropped
all_done = False
idx = data.find(ALL_END)
if idx >= 0:
data = data[:idx]
all_done = True
# find lines separated by LINE_END
parts = data.split(LINE_END)
return parts, all_done
def _process_one_line(line: str, process_json_func):
"""Validate and process one line, which should be a str containing a JSON document."""
json_data = validate_proto(line)
process_json_func(json_data)
def receive_bytes_and_process(sock, receive_bytes_func):
receiver = Receiver(receive_data_func=receive_bytes_func)
while True:
data = sock.recv(MAX_CHUNK_SIZE)
if not data:
return False
done = receiver.receive(data)
if done:
return True
def receive_and_process(sock, process_json_func):
"""Receives and sends lines to process with process_json_func."""
leftover = ""
while True:
data = str(sock.recv(MAX_MSG_SIZE), "utf-8")
if len(data) <= 0:
return False
segs, all_done = _split_data(data)
if all_done:
for seg in segs:
line = leftover + seg
if len(line) > 0:
_process_one_line(line, process_json_func)
leftover = ""
return True
for i in range(len(segs) - 1):
line = leftover + segs[i]
if len(line) > 0:
_process_one_line(line, process_json_func)
leftover = ""
leftover += segs[len(segs) - 1]
class Connection(BaseContext):
def __init__(self, sock, server):
"""Object containing connection information and buffer to build and send a line with socket passed in at init.
Args:
sock: sock for the connection
server: server for the connection
"""
BaseContext.__init__(self)
self.sock = sock
self.server = server
self.app_ctx = None
self.ended = False
self.request = None
self.command = None
self.args = None
self.buffer = Buffer()
self.binary_mode = False
def _send_line(self, line: str, all_end=False):
"""If not ``self.ended``, send line with sock."""
if self.ended:
return
if all_end:
end = ALL_END
self.ended = True
else:
end = LINE_END
self.sock.sendall(bytes(line + end, "utf-8"))
def flush_bytes(self, data):
self.sock.sendall(data)
def append_table(self, headers: List[str], name=None) -> Table:
return self.buffer.append_table(headers, name=name)
def append_string(self, data: str, flush=False, meta: dict = None):
self.buffer.append_string(data, meta=meta)
if flush:
self.flush()
def append_success(self, data: str, flush=False, meta: dict = None):
self.buffer.append_success(data, meta=meta)
if flush:
self.flush()
def append_dict(self, data: dict, flush=False, meta: dict = None):
self.buffer.append_dict(data, meta=meta)
if flush:
self.flush()
def append_error(self, data: str, flush=False, meta: dict = None):
self.buffer.append_error(data, meta=meta)
if flush:
self.flush()
def append_command(self, cmd: str, flush=False):
self.buffer.append_command(cmd)
if flush:
self.flush()
def append_token(self, token: str, flush=False):
self.buffer.append_token(token)
if flush:
self.flush()
def append_shutdown(self, msg: str, flush=False):
self.buffer.append_shutdown(msg)
if flush:
self.flush()
def append_any(self, data, flush=False, meta: dict = None):
if data is None:
return
if isinstance(data, str):
self.append_string(data, flush, meta=meta)
elif isinstance(data, dict):
self.append_dict(data, flush, meta)
else:
self.append_error("unsupported data type {}".format(type(data)))
def update_meta(self, meta: dict):
self.buffer.update_meta(meta)
def flush(self):
line = self.buffer.encode()
if line is None or len(line) <= 0:
return
self.buffer.reset()
self._send_line(line, all_end=False)
def close(self):
if not self.binary_mode:
self.flush()
self._send_line("", all_end=True)
| NVFlare-main | nvflare/fuel/hci/conn.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for file transfer command module."""
SERVER_MODULE_NAME = "file_transfer"
SERVER_CMD_UPLOAD_TEXT = "_upload_text_file"
SERVER_CMD_DOWNLOAD_TEXT = "_download_text_file"
SERVER_CMD_UPLOAD_BINARY = "_upload_binary_file"
SERVER_CMD_DOWNLOAD_BINARY = "_download_binary_file"
SERVER_CMD_UPLOAD_FOLDER = "_upload_folder"
SERVER_CMD_SUBMIT_JOB = "_submit_job"
SERVER_CMD_DOWNLOAD_JOB = "_download_job"
SERVER_CMD_INFO = "_info"
SERVER_CMD_PULL_BINARY = "_pull_binary_file"
DOWNLOAD_URL_MARKER = "Download_URL:"
UPLOAD_FOLDER_FQN = "file_transfer.upload_folder"
DOWNLOAD_FOLDER_FQN = "file_transfer.download_folder"
PULL_FOLDER_FQN = "file_transfer.pull_folder"
PULL_BINARY_FQN = "file_transfer.pull_binary"
| NVFlare-main | nvflare/fuel/hci/file_transfer_defs.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from datetime import datetime
from enum import Enum
from typing import List
from .table import Table
class ProtoKey(object):
TIME = "time"
DATA = "data"
META = "meta"
TYPE = "type"
STRING = "string"
TABLE = "table"
ROWS = "rows"
DICT = "dict"
SUCCESS = "success"
ERROR = "error"
SHUTDOWN = "shutdown"
COMMAND = "command"
TOKEN = "token"
DETAILS = "details"
STATUS = "status"
class MetaKey(object):
STATUS = "status"
INFO = "info"
JOB_ID = "job_id"
DATA_TYPE = "data_type"
JOB_META = "job_meta"
JOB_DATA = "job_data"
WORKSPACE = "workspace"
JOB_DOWNLOAD_URL = "job_download_url"
APP_NAME = "app_name"
SERVER_STATUS = "server_status"
SERVER_START_TIME = "server_start_time"
CLIENT_NAME = "client_name"
CLIENT_LAST_CONNECT_TIME = "client_last_conn_time"
CLIENTS = "clients"
CLIENT_STATUS = "client_status"
JOBS = "jobs"
JOB_NAME = "job_name"
SUBMIT_TIME = "submit_time"
DURATION = "duration"
CMD_TIMEOUT = "cmd_timeout"
CUSTOM_PROPS = "custom_props"
FILES = "files"
CMD_NAME = "cmd_name"
CONTROL_ID = "control_id"
class MetaStatusValue(object):
OK = "ok"
SYNTAX_ERROR = "syntax_error"
NOT_AUTHORIZED = "not_authorized"
NOT_AUTHENTICATED = "not_authenticated"
ERROR = "error"
INTERNAL_ERROR = "internal_error"
INVALID_TARGET = "invalid_target"
INVALID_JOB_DEFINITION = "invalid_job_def"
INVALID_JOB_ID = "invalid_job_id"
JOB_RUNNING = "job_running"
JOB_NOT_RUNNING = "job_not_running"
CLIENTS_RUNNING = "clients_running"
NO_JOBS = "no_jobs"
NO_REPLY = "no_reply"
NO_CLIENTS = "no_clients"
class CredentialType(str, Enum):
PASSWORD = "password"
CERT = "cert"
LOCAL_CERT = "local_cert"
class InternalCommands(object):
PWD_LOGIN = "_login"
CERT_LOGIN = "_cert_login"
LOGOUT = "_logout"
GET_CMD_LIST = "_commands"
CHECK_SESSION = "_check_session"
LIST_SESSIONS = "list_sessions"
class ConfirmMethod(object):
AUTH = "auth"
PASSWORD = "pwd"
YESNO = "yesno"
USER_NAME = "username"
class Buffer(object):
def __init__(self):
"""Buffer to append to for :class:`nvflare.fuel.hci.conn.Connection`."""
self.meta = {}
self.data = []
self.output = {ProtoKey.TIME: f"{format(datetime.now())}", ProtoKey.DATA: self.data, ProtoKey.META: self.meta}
def append_table(self, headers: List[str], name=None) -> Table:
meta_rows = []
if name:
self.meta.update({name: meta_rows})
t = Table(headers, meta_rows)
self.data.append({ProtoKey.TYPE: ProtoKey.TABLE, ProtoKey.ROWS: t.rows})
return t
def update_meta(self, meta: dict):
if meta:
self.meta.update(meta)
def append_string(self, data: str, meta: dict = None):
self.data.append({ProtoKey.TYPE: ProtoKey.STRING, ProtoKey.DATA: data})
self.update_meta(meta)
def append_dict(self, data: dict, meta: dict = None):
self.data.append({ProtoKey.TYPE: ProtoKey.DICT, ProtoKey.DATA: data})
self.update_meta(meta)
def append_success(self, data: str, meta: dict = None):
self.data.append({ProtoKey.TYPE: ProtoKey.SUCCESS, ProtoKey.DATA: data})
if not meta:
meta = make_meta(MetaStatusValue.OK, data)
self.update_meta(meta)
def append_error(self, data: str, meta: dict = None):
self.data.append({ProtoKey.TYPE: ProtoKey.ERROR, ProtoKey.DATA: data})
if not meta:
meta = make_meta(MetaStatusValue.ERROR, data)
self.update_meta(meta)
def append_command(self, cmd: str):
self.data.append({ProtoKey.TYPE: ProtoKey.COMMAND, ProtoKey.DATA: cmd})
def append_token(self, token: str):
self.data.append({ProtoKey.TYPE: ProtoKey.TOKEN, ProtoKey.DATA: token})
def append_shutdown(self, msg: str):
self.data.append({ProtoKey.TYPE: ProtoKey.SHUTDOWN, ProtoKey.DATA: msg})
def encode(self):
if len(self.data) <= 0:
return None
return json.dumps(self.output)
def reset(self):
self.data = []
self.meta = {}
self.output = {ProtoKey.TIME: f"{format(datetime.now())}", ProtoKey.DATA: self.data, ProtoKey.META: self.meta}
def make_error(data: str):
buf = Buffer()
buf.append_error(data)
return buf.output
def validate_proto(line: str):
"""Validate that the line being received is of the expected format.
Args:
line: str containing a JSON document
Returns: deserialized JSON document
"""
all_types = [
ProtoKey.STRING,
ProtoKey.SUCCESS,
ProtoKey.ERROR,
ProtoKey.TABLE,
ProtoKey.COMMAND,
ProtoKey.TOKEN,
ProtoKey.SHUTDOWN,
ProtoKey.DICT,
]
types_with_data = [
ProtoKey.STRING,
ProtoKey.SUCCESS,
ProtoKey.ERROR,
ProtoKey.DICT,
ProtoKey.COMMAND,
ProtoKey.TOKEN,
ProtoKey.SHUTDOWN,
]
try:
json_data = json.loads(line)
assert isinstance(json_data, dict)
assert ProtoKey.DATA in json_data
data = json_data[ProtoKey.DATA]
assert isinstance(data, list)
for item in data:
assert isinstance(item, dict)
assert ProtoKey.TYPE in item
it = item[ProtoKey.TYPE]
assert it in all_types
if it in types_with_data:
item_data = item.get(ProtoKey.DATA, None)
assert item_data is not None
assert isinstance(item_data, str) or isinstance(item_data, dict)
elif it == ProtoKey.TABLE:
assert ProtoKey.ROWS in item
rows = item[ProtoKey.ROWS]
assert isinstance(rows, list)
for row in rows:
assert isinstance(row, list)
return json_data
except Exception:
return None
def make_meta(status: str, info: str = "", extra: dict = None) -> dict:
meta = {MetaKey.STATUS: status, MetaKey.INFO: info}
if extra:
meta.update(extra)
return meta
| NVFlare-main | nvflare/fuel/hci/proto.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/hci/tools/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
from nvflare.fuel.hci.security import hash_password
def main():
"""
TODO: should this file be removed?
"""
user_name = input("User Name: ")
pwd = getpass.getpass("Password (8 or more chars): ")
if len(pwd) < 8:
print("Invalid password - must have at least 8 chars")
return
pwd2 = getpass.getpass("Confirm Password: ")
if pwd != pwd2:
print("Passwords mismatch")
return
result = hash_password(user_name + pwd)
print("Password Hash: {}".format(result))
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/hci/tools/make_pwd.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cmd
import json
import sys
from nvflare.fuel.hci.cmd_arg_utils import split_to_args
from nvflare.fuel.hci.table import Table
from nvflare.fuel.sec.authz import AuthzContext, Person, Policy, parse_policy_config
from nvflare.security.security import COMMAND_CATEGORIES
class Commander(cmd.Cmd):
def __init__(self, policy: Policy):
"""Command line prompt helper tool for getting information for authorization configurations.
Args:
policy: authorization policy object
"""
cmd.Cmd.__init__(self)
self.policy = policy
self.intro = "Type help or ? to list commands.\n"
self.prompt = "> "
def do_bye(self, arg):
"""Exits from the client."""
return True
def emptyline(self):
return
def _split_to_args(self, arg):
if len(arg) <= 0:
return []
else:
return split_to_args(arg)
def do_show_rights(self, arg):
rights = self.policy.rights
table = Table(["right"])
for r in rights:
table.add_row([r])
self.write_table(table)
def do_show_roles(self, arg):
roles = self.policy.roles
table = Table(["role"])
for r in roles:
table.add_row([r])
self.write_table(table)
def do_show_config(self, arg):
config = self.policy.config
self.write_string(json.dumps(config, indent=1))
def do_show_role_rights(self, arg):
role_rights = self.policy.role_rights
table = Table(["role", "right", "conditions"])
for role_name in sorted(role_rights):
right_conds = role_rights[role_name]
for right_name in sorted(right_conds):
conds = right_conds[right_name]
table.add_row([role_name, right_name, str(conds)])
self.write_table(table)
def _parse_person(self, spec: str):
parts = spec.split(":")
if len(parts) != 3:
return "must be like name:org:role"
return Person(parts[0], parts[1], parts[2])
def do_eval_right(self, arg):
args = ["eval_right"] + self._split_to_args(arg)
if len(args) < 4:
self.write_string(
"Usage: {} site_org right_name user_name:org:role [submitter_name:org:role]".format(args[0])
)
return
site_org = args[1]
right_name = args[2]
user_spec = args[3]
submitter_spec = None
if len(args) > 4:
submitter_spec = args[4]
parsed = self._parse_person(user_spec)
if isinstance(parsed, str):
# error
return self.write_error("bad user spec: " + parsed)
user = parsed
submitter = None
if submitter_spec:
parsed = self._parse_person(submitter_spec)
if isinstance(parsed, str):
# error
return self.write_error("bad submitter spec: " + parsed)
submitter = parsed
result, err = self.policy.evaluate(
site_org=site_org, ctx=AuthzContext(right=right_name, user=user, submitter=submitter)
)
if err:
self.write_error(err)
elif result is None:
self.write_string("undetermined")
else:
self.write_string(str(result))
def write_string(self, data: str):
content = data + "\n"
self.stdout.write(content)
def write_table(self, table: Table):
table.write(self.stdout)
def write_error(self, err: str):
content = "Error: " + err + "\n"
self.stdout.write(content)
def define_authz_preview_parser(parser):
parser.add_argument("--policy", "-p", type=str, help="authz policy file", required=True)
def load_policy(policy_file_path):
with open(policy_file_path) as file:
config = json.load(file)
policy, err = parse_policy_config(config, COMMAND_CATEGORIES)
if err:
print("Policy config error: {}".format(err))
sys.exit(1)
return policy
def run_command(args):
policy = load_policy(args.policy)
commander = Commander(policy)
commander.cmdloop(intro="Type help or ? to list commands.")
def main():
"""Tool to help preview and see the details of an authorization policy with command line commands."""
parser = argparse.ArgumentParser()
define_authz_preview_parser(parser)
args = parser.parse_args()
run_command(args)
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/hci/tools/authz_preview.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.hci.client.cli import AdminClient, CredentialType
from nvflare.fuel.hci.client.config import FLAdminClientStarterConfigurator
from nvflare.fuel.hci.client.file_transfer import FileTransferModule
from nvflare.fuel.hci.client.overseer_service_finder import ServiceFinderByOverseer
from nvflare.security.logging import secure_format_exception
def main():
"""
Script to launch the admin client to issue admin commands to the server.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_admin", "-s", type=str, help="json file with configurations for launching admin client", required=True
)
parser.add_argument("--cli_history_size", type=int, default=1000)
parser.add_argument("--with_debug", action="store_true")
args = parser.parse_args()
try:
os.chdir(args.workspace)
workspace = Workspace(root_dir=args.workspace)
conf = FLAdminClientStarterConfigurator(workspace=workspace)
conf.configure()
except ConfigError as e:
print(f"ConfigError: {secure_format_exception(e)}")
return
try:
admin_config = conf.config_data["admin"]
except KeyError:
print("Missing admin section in fed_admin configuration.")
return
modules = []
if admin_config.get("with_file_transfer"):
modules.append(
FileTransferModule(upload_dir=admin_config.get("upload_dir"), download_dir=admin_config.get("download_dir"))
)
ca_cert = admin_config.get("ca_cert", "")
client_cert = admin_config.get("client_cert", "")
client_key = admin_config.get("client_key", "")
if admin_config.get("with_ssl"):
if len(ca_cert) <= 0:
print("missing CA Cert file name field ca_cert in fed_admin configuration")
return
if len(client_cert) <= 0:
print("missing Client Cert file name field client_cert in fed_admin configuration")
return
if len(client_key) <= 0:
print("missing Client Key file name field client_key in fed_admin configuration")
return
else:
ca_cert = None
client_key = None
client_cert = None
if args.with_debug:
print("SSL: {}".format(admin_config.get("with_ssl")))
print("File Transfer: {}".format(admin_config.get("with_file_transfer")))
if admin_config.get("with_file_transfer"):
print(" Upload Dir: {}".format(admin_config.get("upload_dir")))
print(" Download Dir: {}".format(admin_config.get("download_dir")))
service_finder = None
if conf.overseer_agent:
service_finder = ServiceFinderByOverseer(conf.overseer_agent)
client = AdminClient(
prompt=admin_config.get("prompt", "> "),
cmd_modules=modules,
ca_cert=ca_cert,
client_cert=client_cert,
client_key=client_key,
upload_dir=admin_config.get("upload_dir"),
download_dir=admin_config.get("download_dir"),
credential_type=CredentialType(admin_config.get("cred_type", CredentialType.PASSWORD.value)),
debug=args.with_debug,
service_finder=service_finder,
username=admin_config.get("username", ""),
handlers=conf.handlers,
cli_history_dir=args.workspace,
cli_history_size=args.cli_history_size,
)
client.run()
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/hci/tools/admin.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socketserver
import ssl
import threading
from nvflare.fuel.hci.conn import Connection, receive_til_end
from nvflare.fuel.hci.proto import MetaKey, MetaStatusValue, ProtoKey, make_meta, validate_proto
from nvflare.fuel.hci.security import IdentityKey, get_identity_info
from nvflare.security.logging import secure_log_traceback
from .constants import ConnProps
from .reg import ServerCommandRegister
logger = logging.getLogger(__name__)
class _MsgHandler(socketserver.BaseRequestHandler):
"""Message handler.
Used by the AdminServer to receive admin commands, validate, then process and do command through the
ServerCommandRegister.
"""
def handle(self):
try:
conn = Connection(self.request, self.server)
conn.set_prop(ConnProps.CA_CERT, self.server.ca_cert)
if self.server.extra_conn_props:
conn.set_props(self.server.extra_conn_props)
if self.server.cmd_reg.conn_props:
conn.set_props(self.server.cmd_reg.conn_props)
if self.server.use_ssl:
identity = get_identity_info(self.request.getpeercert())
conn.set_prop(ConnProps.CLIENT_IDENTITY, identity)
valid = self.server.validate_client_cn(identity[IdentityKey.NAME])
else:
valid = True
if not valid:
conn.append_error(
"authentication error", meta=make_meta(MetaStatusValue.NOT_AUTHENTICATED, info="invalid credential")
)
else:
req = receive_til_end(self.request).strip()
command = None
req_json = validate_proto(req)
conn.request = req_json
if req_json is not None:
meta = req_json.get(ProtoKey.META, None)
if meta and isinstance(meta, dict):
cmd_timeout = meta.get(MetaKey.CMD_TIMEOUT)
if cmd_timeout:
conn.set_prop(ConnProps.CMD_TIMEOUT, cmd_timeout)
custom_props = meta.get(MetaKey.CUSTOM_PROPS)
if custom_props:
conn.set_prop(ConnProps.CUSTOM_PROPS, custom_props)
data = req_json[ProtoKey.DATA]
for item in data:
it = item[ProtoKey.TYPE]
if it == ProtoKey.COMMAND:
command = item[ProtoKey.DATA]
break
if command is None:
conn.append_error(
"protocol violation", meta=make_meta(MetaStatusValue.INTERNAL_ERROR, "protocol violation")
)
else:
self.server.cmd_reg.process_command(conn, command)
else:
# not json encoded
conn.append_error(
"protocol violation", meta=make_meta(MetaStatusValue.INTERNAL_ERROR, "protocol violation")
)
if not conn.ended:
conn.close()
except Exception:
secure_log_traceback()
def initialize_hci():
socketserver.TCPServer.allow_reuse_address = True
class AdminServer(socketserver.ThreadingTCPServer):
# faster re-binding
allow_reuse_address = True
# make this bigger than five
request_queue_size = 10
# kick connections when we exit
daemon_threads = True
def __init__(
self,
cmd_reg: ServerCommandRegister,
host,
port,
ca_cert=None,
server_cert=None,
server_key=None,
accepted_client_cns=None,
extra_conn_props=None,
):
"""Base class of FedAdminServer to create a server that can receive commands.
Args:
cmd_reg: CommandRegister
host: the IP address of the admin server
port: port number of admin server
ca_cert: the root CA's cert file name
server_cert: server's cert, signed by the CA
server_key: server's private key file
accepted_client_cns: list of accepted Common Names from client, if specified
extra_conn_props: a dict of extra conn props, if specified
"""
if extra_conn_props is not None:
assert isinstance(extra_conn_props, dict), "extra_conn_props must be dict but got {}".format(
extra_conn_props
)
socketserver.TCPServer.__init__(self, ("0.0.0.0", port), _MsgHandler, False)
self.use_ssl = False
if ca_cert and server_cert:
if accepted_client_cns:
assert isinstance(accepted_client_cns, list), "accepted_client_cns must be list but got {}.".format(
accepted_client_cns
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
# This feature is only supported on 3.7+
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(ca_cert)
ctx.load_cert_chain(certfile=server_cert, keyfile=server_key)
# replace the socket with an ssl version of itself
self.socket = ctx.wrap_socket(self.socket, server_side=True)
self.use_ssl = True
# bind the socket and start the server
self.server_bind()
self.server_activate()
self._thread = None
self.ca_cert = ca_cert
self.host = host
self.port = port
self.accepted_client_cns = accepted_client_cns
self.extra_conn_props = extra_conn_props
self.cmd_reg = cmd_reg
cmd_reg.finalize()
def validate_client_cn(self, cn):
if self.accepted_client_cns:
return cn in self.accepted_client_cns
else:
return True
def stop(self):
self.shutdown()
self.cmd_reg.close()
logger.info(f"Admin Server {self.host} on Port {self.port} shutdown!")
def set_command_registry(self, cmd_reg: ServerCommandRegister):
if cmd_reg:
cmd_reg.finalize()
if self.cmd_reg:
self.cmd_reg.close()
self.cmd_reg = cmd_reg
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run, args=())
self._thread.daemon = True
if not self._thread.is_alive():
self._thread.start()
def _run(self):
logger.info(f"Starting Admin Server {self.host} on Port {self.port}")
self.serve_forever()
| NVFlare-main | nvflare/fuel/hci/server/hci.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.sec.audit import Auditor
from .constants import ConnProps
from .reg import CommandFilter
class CommandAudit(CommandFilter):
def __init__(self, auditor: Auditor):
"""Command filter for auditing by adding events.
This filter needs to be registered after the login filter because it needs the username established
by the login filter.
Args:
auditor: instance of Auditor
"""
CommandFilter.__init__(self)
assert isinstance(auditor, Auditor), "auditor must be Auditor but got {}".format(type(auditor))
self.auditor = auditor
def pre_command(self, conn: Connection, args: List[str]):
user_name = conn.get_prop(ConnProps.USER_NAME, "?")
event_id = self.auditor.add_event(
user=user_name,
action=conn.command[:100], # at most 100 chars
)
conn.set_prop(ConnProps.EVENT_ID, event_id)
return True
| NVFlare-main | nvflare/fuel/hci/server/audit.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from typing import List
import nvflare.fuel.hci.file_transfer_defs as ftd
from nvflare.fuel.hci.base64_utils import (
b64str_to_binary_file,
b64str_to_bytes,
b64str_to_text_file,
binary_file_to_b64str,
text_file_to_b64str,
)
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.fuel.utils.zip_utils import unzip_all_from_bytes
from nvflare.private.fed.server.cmd_utils import CommandUtil
from nvflare.security.logging import secure_format_exception, secure_log_traceback
class FileTransferModule(CommandModule, CommandUtil):
def __init__(self, upload_dir: str, download_dir: str):
"""Command module for file transfers.
Args:
upload_dir:
download_dir:
"""
if not os.path.isdir(upload_dir):
raise ValueError("upload_dir {} is not a valid dir".format(upload_dir))
if not os.path.isdir(download_dir):
raise ValueError("download_dir {} is not a valid dir".format(download_dir))
self.upload_dir = upload_dir
self.download_dir = download_dir
def get_spec(self):
return CommandModuleSpec(
name=ftd.SERVER_MODULE_NAME,
cmd_specs=[
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_TEXT,
description="upload one or more text files",
usage="_upload name1 data1 name2 data2 ...",
handler_func=self.upload_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_TEXT,
description="download one or more text files",
usage="download file_name ...",
handler_func=self.download_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_BINARY,
description="upload one or more binary files",
usage="upload name1 data1 name2 data2 ...",
handler_func=self.upload_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_BINARY,
description="download one or more binary files",
usage="download file_name ...",
handler_func=self.download_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_FOLDER,
description="upload a folder from client",
usage="upload_folder folder_name",
handler_func=self.upload_folder,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_INFO,
description="show info",
usage="info",
handler_func=self.info,
visible=False,
),
],
conn_props={ConnProps.DOWNLOAD_DIR: self.download_dir, ConnProps.UPLOAD_DIR: self.upload_dir},
)
def upload_file(self, conn: Connection, args: List[str], str_to_file_func):
if len(args) < 3:
conn.append_error("syntax error: missing files")
return
if len(args) % 2 != 1:
conn.append_error("syntax error: file name/data not paired")
return
table = conn.append_table(["file", "size"])
i = 1
while i < len(args):
name = args[i]
data = args[i + 1]
i += 2
full_path = os.path.join(self.upload_dir, name)
num_bytes = str_to_file_func(b64str=data, file_name=full_path)
table.add_row([name, str(num_bytes)])
def upload_text_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_text_file)
def upload_binary_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_binary_file)
def download_file(self, conn: Connection, args: List[str], file_to_str_func):
if len(args) < 2:
conn.append_error("syntax error: missing file names")
return
table = conn.append_table(["name", "data"])
for i in range(1, len(args)):
file_name = args[i]
full_path = os.path.join(self.download_dir, file_name)
if not os.path.exists(full_path):
conn.append_error("no such file: {}".format(file_name))
continue
if not os.path.isfile(full_path):
conn.append_error("not a file: {}".format(file_name))
continue
encoded_str = file_to_str_func(full_path)
table.add_row([file_name, encoded_str])
def download_text_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, text_file_to_b64str)
def download_binary_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, binary_file_to_b64str)
def _authorize_upload_folder(self, conn: Connection, args: List[str]):
if len(args) != 3:
conn.append_error("syntax error: require data")
return False, None
folder_name = args[1]
zip_b64str = args[2]
tmp_dir = tempfile.mkdtemp()
try:
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, tmp_dir)
tmp_folder_path = os.path.join(tmp_dir, folder_name)
if not os.path.isdir(tmp_folder_path):
conn.append_error("logic error: unzip failed to create folder {}".format(tmp_folder_path))
return False, None
return True, None
except Exception as e:
secure_log_traceback()
conn.append_error(f"exception occurred: {secure_format_exception(e)}")
return False, None
finally:
shutil.rmtree(tmp_dir)
def upload_folder(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
folder_path = os.path.join(self.upload_dir, folder_name)
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, self.upload_dir)
conn.set_prop("upload_folder_path", folder_path)
conn.append_string("Created folder {}".format(folder_path))
def info(self, conn: Connection, args: List[str]):
conn.append_string("Server Upload Destination: {}".format(self.upload_dir))
conn.append_string("Server Download Source: {}".format(self.download_dir))
| NVFlare-main | nvflare/fuel/hci/server/file_transfer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ConnProps(object):
"""Constants for connection properties."""
EVENT_ID = "_eventId"
USER_NAME = "_userName"
USER_ORG = "_userOrg"
USER_ROLE = "_userRole"
SUBMITTER_NAME = "_submitterName"
SUBMITTER_ORG = "_submitterOrg"
SUBMITTER_ROLE = "_submitterRole"
TOKEN = "_sessionToken"
SESSION = "_session"
CLIENT_IDENTITY = "_clientIdentity"
CA_CERT = "_caCert"
UPLOAD_DIR = "_uploadDir"
DOWNLOAD_DIR = "_downloadDir"
DOWNLOAD_JOB_URL = "_downloadJobUrl"
CMD_ENTRY = "_cmdEntry"
JOB_DATA = "_jobData"
JOB_META = "_jobMeta"
CMD_TIMEOUT = "_cmdTimeout"
CUSTOM_PROPS = "_customProps"
| NVFlare-main | nvflare/fuel/hci/server/constants.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/hci/server/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.hci.cmd_arg_utils import split_to_args
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandRegister
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from .constants import ConnProps
class CommandFilter(object):
"""Base class for filters to run before or after commands."""
def pre_command(self, conn: Connection, args: List[str]) -> bool:
"""Code to execute before executing a command.
Returns: True to continue filter chain or False to not
"""
return True
def post_command(self, conn: Connection, args: List[str]) -> bool:
"""Code to execute after executing a command."""
pass
def close(self):
pass
class ServerCommandRegister(CommandRegister):
def __init__(self, app_ctx):
"""Runs filters and executes commands by calling their handler function.
This is the main command register used by AdminServer.
Args:
app_ctx: app context
"""
CommandRegister.__init__(self, app_ctx)
self.filters = []
self.closed = False
def add_filter(self, cmd_filter: CommandFilter):
assert isinstance(cmd_filter, CommandFilter), "cmd_filter must be CommandFilter but got {}.".format(
type(cmd_filter)
)
self.filters.append(cmd_filter)
def _do_command(self, conn: Connection, command: str):
"""Executes command.
Getting the command from the command registry, invoke filters and call the handler function, passing along conn
and the args split from the command.
"""
conn.app_ctx = self.app_ctx
args = split_to_args(command)
conn.args = args
conn.command = command
cmd_name = args[0]
entries = self.get_command_entries(cmd_name)
if len(entries) <= 0:
conn.append_error('Unknown command "{}"'.format(cmd_name))
return
elif len(entries) == 1:
conn.set_prop(ConnProps.CMD_ENTRY, entries[0])
handler = entries[0].handler
else:
conn.append_error('Command "{}" exists in multiple scopes. Please use full command name'.format(cmd_name))
return
if handler is None:
conn.append_error('Unknown command "{}"'.format(cmd_name))
return
# invoke pre filters
if len(self.filters) > 0:
for f in self.filters:
ok = f.pre_command(conn, args)
if not ok:
return
handler(conn, args)
# invoke post filters
if len(self.filters) > 0:
for f in self.filters:
f.post_command(conn, args)
def process_command(self, conn: Connection, command: str):
try:
self._do_command(conn, command)
except Exception as e:
secure_log_traceback()
conn.append_error(f"Exception Occurred: {secure_format_exception(e)}")
def close(self):
if self.closed:
return
for f in self.filters:
f.close()
for m in self.modules:
m.close()
self.closed = True
| NVFlare-main | nvflare/fuel/hci/server/reg.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandEntry, CommandModule, CommandModuleSpec, CommandSpec
from .reg import ServerCommandRegister
class BuiltInCmdModule(CommandModule):
def __init__(self, reg: ServerCommandRegister):
"""Built in CommandModule with the ability to list commands.
Args:
reg: ServerCommandRegister
"""
self.reg = reg
def get_spec(self):
return CommandModuleSpec(
name="",
cmd_specs=[
CommandSpec(
name="_commands",
description="list server commands",
usage="_commands",
handler_func=self.handle_list_commands,
visible=False,
)
],
)
def _show_command(self, conn: Connection, cmd_name):
entries = self.reg.get_command_entries(cmd_name)
if len(entries) <= 0:
conn.append_error("undefined command {}\n".format(cmd_name))
return
for e in entries:
if not e.visible:
continue
if len(e.scope.name) > 0:
conn.append_string("Command: {}.{}".format(e.scope.name, cmd_name))
else:
conn.append_string("Command: {}".format(cmd_name))
conn.append_string("Description: {}".format(e.desc))
conn.append_string("Usage: {}\n".format(e.usage))
def handle_list_commands(self, conn: Connection, args: List[str]):
if len(args) <= 1:
table = conn.append_table(["Scope", "Command", "Description", "Usage", "Confirm", "ClientCmd", "Visible"])
for scope_name in sorted(self.reg.scopes):
scope = self.reg.scopes[scope_name]
for cmd_name in sorted(scope.entries):
assert isinstance(cmd_name, str)
e = scope.entries[cmd_name]
assert isinstance(e, CommandEntry)
if not cmd_name.startswith("_"):
# NOTE: command name that starts with _ is internal command and should not be sent to client!
table.add_row([scope_name, cmd_name, e.desc, e.usage, e.confirm, e.client_cmd, str(e.visible)])
else:
for cmd_name in args[1:]:
self._show_command(conn, cmd_name)
def new_command_register_with_builtin_module(app_ctx):
"""Creates ServerCommandRegister and registers builtin command module.
Args:
app_ctx: engine
Returns: ServerCommandRegister
"""
reg = ServerCommandRegister(app_ctx=app_ctx)
reg.register_module(BuiltInCmdModule(reg))
return reg
| NVFlare-main | nvflare/fuel/hci/server/builtin.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import CredentialType, InternalCommands
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.security import IdentityKey, verify_password
from nvflare.fuel.hci.server.constants import ConnProps
from .reg import CommandFilter
from .sess import Session, SessionManager
class Authenticator(ABC):
"""Base class for authenticating credentials."""
@abstractmethod
def authenticate(self, user_name: str, credential: str, credential_type: CredentialType) -> bool:
"""Authenticate a specified user with the provided credential.
Args:
user_name: user login name
credential: provided credential
credential_type: type of credential
Returns: True if successful, False otherwise
"""
pass
class SimpleAuthenticator(Authenticator):
def __init__(self, users):
"""Authenticator to use in the LoginModule for authenticating admin clients for login.
Args:
users: user information
"""
self.users = users
def authenticate_password(self, user_name: str, pwd: str):
pwd_hash = self.users.get(user_name)
if pwd_hash is None:
return False
return verify_password(pwd_hash, pwd)
def authenticate_cn(self, user_name: str, cn):
return user_name == cn
def authenticate(self, user_name: str, credential, credential_type):
if credential_type == CredentialType.PASSWORD:
return self.authenticate_password(user_name, credential)
elif credential_type == CredentialType.CERT:
return self.authenticate_cn(user_name, credential)
else:
return False
class LoginModule(CommandModule, CommandFilter):
def __init__(self, authenticator: Authenticator, sess_mgr: SessionManager):
"""Login module.
CommandModule containing the login commands to handle login and logout of admin clients, as well as the
CommandFilter pre_command to check that a client is logged in with a valid session.
Args:
authenticator: Authenticator
sess_mgr: SessionManager
"""
if authenticator:
if not isinstance(authenticator, Authenticator):
raise TypeError("authenticator must be Authenticator but got {}.".format(type(authenticator)))
if not isinstance(sess_mgr, SessionManager):
raise TypeError("sess_mgr must be SessionManager but got {}.".format(type(sess_mgr)))
self.authenticator = authenticator
self.session_mgr = sess_mgr
def get_spec(self):
return CommandModuleSpec(
name="login",
cmd_specs=[
CommandSpec(
name=InternalCommands.PWD_LOGIN,
description="login to server",
usage="login userName password",
handler_func=self.handle_login,
visible=False,
),
CommandSpec(
name=InternalCommands.CERT_LOGIN,
description="login to server with SSL cert",
usage="login userName",
handler_func=self.handle_cert_login,
visible=False,
),
CommandSpec(
name="_logout",
description="logout from server",
usage="logout",
handler_func=self.handle_logout,
visible=False,
),
],
)
def handle_login(self, conn: Connection, args: List[str]):
if not self.authenticator:
conn.append_string("OK")
return
if len(args) != 3:
conn.append_string("REJECT")
return
user_name = args[1]
pwd = args[2]
ok = self.authenticator.authenticate(user_name, pwd, CredentialType.PASSWORD)
if not ok:
conn.append_string("REJECT")
return
session = self.session_mgr.create_session(user_name=user_name, user_org="global", user_role="project_admin")
conn.append_string("OK")
conn.append_token(session.token)
def handle_cert_login(self, conn: Connection, args: List[str]):
if not self.authenticator:
conn.append_string("OK")
return
if len(args) != 2:
conn.append_string("REJECT")
return
identity = conn.get_prop(ConnProps.CLIENT_IDENTITY, None)
if identity is None:
conn.append_string("REJECT")
return
user_name = args[1]
ok = self.authenticator.authenticate(user_name, identity[IdentityKey.NAME], CredentialType.CERT)
if not ok:
conn.append_string("REJECT")
return
session = self.session_mgr.create_session(
user_name=identity[IdentityKey.NAME],
user_org=identity.get(IdentityKey.ORG, ""),
user_role=identity.get(IdentityKey.ROLE, ""),
)
conn.append_string("OK")
conn.append_token(session.token)
def handle_logout(self, conn: Connection, args: List[str]):
if self.authenticator and self.session_mgr:
token = conn.get_prop(ConnProps.TOKEN)
if token:
self.session_mgr.end_session(token)
conn.append_string("OK")
def pre_command(self, conn: Connection, args: List[str]):
if args[0] in [InternalCommands.PWD_LOGIN, InternalCommands.CERT_LOGIN, InternalCommands.CHECK_SESSION]:
# skip login and check session commands
return True
# validate token
req_json = conn.request
token = None
data = req_json["data"]
for item in data:
it = item["type"]
if it == "token":
token = item["data"]
break
if token is None:
conn.append_error("not authenticated - no token")
return False
sess = self.session_mgr.get_session(token)
if sess:
assert isinstance(sess, Session)
sess.mark_active()
conn.set_prop(ConnProps.SESSION, sess)
conn.set_prop(ConnProps.USER_NAME, sess.user_name)
conn.set_prop(ConnProps.USER_ORG, sess.user_org)
conn.set_prop(ConnProps.USER_ROLE, sess.user_role)
conn.set_prop(ConnProps.TOKEN, token)
return True
else:
conn.append_error("session_inactive")
conn.append_string(
"user not authenticated or session timed out after {} seconds of inactivity - logged out".format(
self.session_mgr.idle_timeout
)
)
return False
def close(self):
self.session_mgr.shutdown()
| NVFlare-main | nvflare/fuel/hci/server/login.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import MetaStatusValue, make_meta
from nvflare.fuel.hci.reg import CommandEntry
from nvflare.fuel.sec.authz import AuthorizationService, AuthzContext, Person
from .constants import ConnProps
from .reg import CommandFilter
log = logging.getLogger(__name__)
class PreAuthzReturnCode(enum.Enum):
OK = 0 # command preprocessed successfully, and no authz needed
ERROR = 1 # error occurred in command processing
REQUIRE_AUTHZ = 2 # command preprocessed successfully, further authz required
def command_handler_func_signature(conn: Connection, args: List[str]):
pass
def command_authz_func_signature(conn: Connection, args: List[str]) -> PreAuthzReturnCode:
pass
class AuthzFilter(CommandFilter):
def __init__(self):
"""Filter for authorization of admin commands."""
CommandFilter.__init__(self)
def pre_command(self, conn: Connection, args: List[str]):
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY, None)
if not cmd_entry:
return True
assert isinstance(cmd_entry, CommandEntry)
authz_func = cmd_entry.authz_func
if not authz_func:
return True
return_code = authz_func(conn, args)
if return_code == PreAuthzReturnCode.OK:
return True
if return_code == PreAuthzReturnCode.ERROR:
return False
# authz required - the command name is the name of the right to be checked!
user = Person(
name=conn.get_prop(ConnProps.USER_NAME, ""),
org=conn.get_prop(ConnProps.USER_ORG, ""),
role=conn.get_prop(ConnProps.USER_ROLE, ""),
)
submitter = Person(
name=conn.get_prop(ConnProps.SUBMITTER_NAME, ""),
org=conn.get_prop(ConnProps.SUBMITTER_ORG, ""),
role=conn.get_prop(ConnProps.SUBMITTER_ORG, ""),
)
ctx = AuthzContext(user=user, submitter=submitter, right=cmd_entry.name)
log.debug("User: {} Submitter: {} Right: {}".format(user, submitter, cmd_entry.name))
authorized, err = AuthorizationService.authorize(ctx)
if err:
conn.append_error(f"Authorization Error: {err}", meta=make_meta(MetaStatusValue.NOT_AUTHORIZED, err))
return False
if not authorized:
conn.append_error(
"This action is not authorized", meta=make_meta(MetaStatusValue.NOT_AUTHORIZED, "not authorized")
)
return False
return True
| NVFlare-main | nvflare/fuel/hci/server/authz.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import InternalCommands
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.security import make_session_token
from nvflare.fuel.utils.time_utils import time_to_string
LIST_SESSIONS_CMD_NAME = InternalCommands.LIST_SESSIONS
CHECK_SESSION_CMD_NAME = InternalCommands.CHECK_SESSION
class Session(object):
def __init__(self):
"""Object keeping track of an admin client session with token and time data."""
self.user_name = None
self.user_org = None
self.user_role = None
self.start_time = None
self.last_active_time = None
self.token = None
def mark_active(self):
self.last_active_time = time.time()
class SessionManager(CommandModule):
def __init__(self, idle_timeout=3600, monitor_interval=5):
"""Session manager.
Args:
idle_timeout: session idle timeout
monitor_interval: interval for obtaining updates when monitoring
"""
if monitor_interval <= 0:
monitor_interval = 5
self.sess_update_lock = threading.Lock()
self.sessions = {} # token => Session
self.idle_timeout = idle_timeout
self.monitor_interval = monitor_interval
self.asked_to_stop = False
self.monitor = threading.Thread(target=self.monitor_sessions)
self.monitor.daemon = True
self.monitor.start()
def monitor_sessions(self):
"""Runs loop in a thread to end sessions that time out."""
while True:
# print('checking for dead sessions ...')
if self.asked_to_stop:
break
dead_sess = None
for _, sess in self.sessions.items():
time_passed = time.time() - sess.last_active_time
# print('time passed: {} secs'.format(time_passed))
if time_passed > self.idle_timeout:
dead_sess = sess
break
if dead_sess:
# print('ending dead session {}'.format(dead_sess.token))
self.end_session(dead_sess.token)
else:
# print('no dead sessions found')
pass
time.sleep(self.monitor_interval)
def shutdown(self):
self.asked_to_stop = True
# self.monitor.join(timeout=10)
def create_session(self, user_name, user_org, user_role):
"""Creates new session with a new session token.
Args:
user_name: user name for session
user_org: org of the user
user_role: user's role
Returns: Session
"""
token = make_session_token()
sess = Session()
sess.user_name = user_name
sess.user_role = user_role
sess.user_org = user_org
sess.start_time = time.time()
sess.last_active_time = sess.start_time
sess.token = token
with self.sess_update_lock:
self.sessions[token] = sess
return sess
def get_session(self, token: str):
with self.sess_update_lock:
return self.sessions.get(token)
def get_sessions(self):
result = []
with self.sess_update_lock:
for _, s in self.sessions.items():
result.append(s)
return result
def end_session(self, token):
with self.sess_update_lock:
self.sessions.pop(token, None)
def get_spec(self):
return CommandModuleSpec(
name="sess",
cmd_specs=[
CommandSpec(
name=LIST_SESSIONS_CMD_NAME,
description="list user sessions",
usage=LIST_SESSIONS_CMD_NAME,
handler_func=self.handle_list_sessions,
visible=False,
enabled=False,
),
CommandSpec(
name=CHECK_SESSION_CMD_NAME,
description="check if session is active",
usage=CHECK_SESSION_CMD_NAME,
handler_func=self.handle_check_session,
visible=False,
),
],
)
def handle_list_sessions(self, conn: Connection, args: List[str]):
"""Lists sessions and the details in a table.
Registered in the FedAdminServer with ``cmd_reg.register_module(sess_mgr)``.
"""
with self.sess_update_lock:
sess_list = list(self.sessions.values())
sess_list.sort(key=lambda x: x.user_name, reverse=False)
table = conn.append_table(["User", "Org", "Role", "Session ID", "Start", "Last Active", "Idle"])
for s in sess_list:
table.add_row(
[
s.user_name,
s.user_org,
s.user_role,
"{}".format(s.token),
time_to_string(s.start_time),
time_to_string(s.last_active_time),
"{}".format(time.time() - s.last_active_time),
]
)
def handle_check_session(self, conn: Connection, args: List[str]):
token = None
data = conn.request["data"]
for item in data:
it = item["type"]
if it == "token":
token = item["data"]
break
sess = self.get_session(token)
if sess:
conn.append_string("OK")
else:
conn.append_error("session_inactive")
conn.append_string(
"admin client session timed out after {} seconds of inactivity - logging out".format(self.idle_timeout)
)
| NVFlare-main | nvflare/fuel/hci/server/sess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from nvflare.fuel.hci.chunk import MAX_CHUNK_SIZE, Sender
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import MetaKey, MetaStatusValue, make_meta
from nvflare.fuel.hci.server.constants import ConnProps
class _BytesSender:
def __init__(self, conn: Connection):
self.conn = conn
def send(self, data):
self.conn.flush_bytes(data)
class BinaryTransfer:
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
def download_file(self, conn: Connection, file_name):
download_dir = conn.get_prop(ConnProps.DOWNLOAD_DIR)
conn.binary_mode = True
full_path = os.path.join(download_dir, file_name)
if not os.path.exists(full_path):
self.logger.error(f"no such file: {full_path}")
return
if not os.path.isfile(full_path):
self.logger.error(f"not a file: {full_path}")
return
self.logger.debug(f"called to send {full_path} ...")
bytes_sender = _BytesSender(conn)
sender = Sender(send_data_func=bytes_sender.send)
buffer_size = MAX_CHUNK_SIZE
bytes_sent = 0
with open(full_path, mode="rb") as f:
chunk = f.read(buffer_size)
while chunk:
sender.send(chunk)
bytes_sent += len(chunk)
chunk = f.read(buffer_size)
sender.close()
self.logger.debug(f"finished sending {full_path}: {bytes_sent} bytes sent")
def download_folder(self, conn: Connection, folder_name: str, download_file_cmd_name: str, control_id: str):
download_dir = conn.get_prop(ConnProps.DOWNLOAD_DIR)
folder_path = os.path.join(download_dir, folder_name)
self.logger.debug(f"download_folder called for {folder_name}")
# return list of the files
files = []
for (dir_path, dir_names, file_names) in os.walk(folder_path):
for f in file_names:
p = os.path.join(dir_path, f)
p = os.path.relpath(p, folder_path)
p = os.path.join(folder_name, p)
files.append(p)
self.logger.debug(f"files of the folder: {files}")
conn.append_string(
"OK",
meta=make_meta(
MetaStatusValue.OK,
extra={MetaKey.FILES: files, MetaKey.CONTROL_ID: control_id, MetaKey.CMD_NAME: download_file_cmd_name},
),
)
user_name = conn.get_prop(ConnProps.USER_NAME, "?")
self.logger.info(f"downloaded {control_id} to user {user_name}")
| NVFlare-main | nvflare/fuel/hci/server/binary_transfer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from nvflare.fuel.common.ctx import SimpleContext
class EventType:
WAIT_FOR_SERVER_ADDR = "wait_for_server_addr"
SERVER_ADDR_OBTAINED = "server_addr_obtained"
SESSION_CLOSED = "session_closed" # close the current session
LOGIN_SUCCESS = "login_success" # logged in to server
LOGIN_FAILURE = "login_failure" # cannot log in to server
TRYING_LOGIN = "trying_login" # still try to log in
SP_ADDR_CHANGED = "sp_addr_changed" # service provider address changed
SESSION_TIMEOUT = "session_timeout" # server timed out current session
BEFORE_LOGIN = "before_login"
BEFORE_EXECUTE_CMD = "before_execute_cmd"
class EventPropKey:
MSG = "msg"
USER_NAME = "user_name"
CMD_NAME = "cmd_name"
CMD_CTX = "cmd_ctx"
CUSTOM_PROPS = "custom_props"
class EventContext(SimpleContext):
def get_custom_prop(self, key: str, default):
props = self.get_prop(EventPropKey.CUSTOM_PROPS)
if not props:
return default
return props.get(key, default)
def set_custom_prop(self, key: str, value):
props = self.get_prop(EventPropKey.CUSTOM_PROPS)
if not props:
props = {}
self.set_prop(EventPropKey.CUSTOM_PROPS, props)
props[key] = value
class EventHandler(ABC):
@abstractmethod
def handle_event(self, event_type: str, ctx: SimpleContext):
pass
| NVFlare-main | nvflare/fuel/hci/client/event.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.utils.json_scanner import Node
from nvflare.fuel.utils.wfconf import ConfigContext
from nvflare.private.json_configer import JsonConfigurator
from .event import EventHandler
FL_PACKAGES = ["nvflare"]
FL_MODULES = ["ha"]
class FLAdminClientStarterConfigurator(JsonConfigurator):
"""FL Admin Client startup configurator."""
def __init__(self, workspace: Workspace):
"""Uses the json configuration to start the FL admin client.
Args:
workspace: the workspace object
"""
base_pkgs = FL_PACKAGES
module_names = FL_MODULES
custom_dir = workspace.get_client_custom_dir()
if os.path.isdir(custom_dir):
sys.path.append(custom_dir)
admin_config_file_path = workspace.get_admin_startup_file_path()
config_files = [admin_config_file_path]
resources_file_path = workspace.get_resources_file_path()
if resources_file_path:
config_files.append(resources_file_path)
JsonConfigurator.__init__(
self,
config_file_name=config_files,
base_pkgs=base_pkgs,
module_names=module_names,
exclude_libs=True,
)
self.workspace = workspace
self.admin_config_file_path = config_files
self.overseer_agent = None
self.handlers = []
def process_config_element(self, config_ctx: ConfigContext, node: Node):
"""Process config element.
Args:
config_ctx: config context
node: element node
"""
element = node.element
path = node.path()
if path == "admin.overseer_agent":
self.overseer_agent = self.build_component(element)
return
if re.search(r"^handlers\.#[0-9]+$", path):
c = self.build_component(element)
if not isinstance(c, EventHandler):
raise ConfigError(f"component must be EventHandler but got {type(c)}")
self.handlers.append(c)
return
def start_config(self, config_ctx: ConfigContext):
"""Start the config process.
Args:
config_ctx: config context
"""
super().start_config(config_ctx)
try:
admin = self.config_data["admin"]
if admin.get("client_key"):
admin["client_key"] = self.workspace.get_file_path_in_startup(admin["client_key"])
if admin.get("client_cert"):
admin["client_cert"] = self.workspace.get_file_path_in_startup(admin["client_cert"])
if admin.get("ca_cert"):
admin["ca_cert"] = self.workspace.get_file_path_in_startup(admin["ca_cert"])
if admin.get("upload_dir"):
admin["upload_dir"] = self.workspace.get_file_path_in_root(admin["upload_dir"])
if admin.get("download_dir"):
admin["download_dir"] = self.workspace.get_file_path_in_root(admin["download_dir"])
except Exception:
raise ValueError(f"Client config error: '{self.admin_config_file_path}'")
| NVFlare-main | nvflare/fuel/hci/client/config.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import nvflare.fuel.hci.file_transfer_defs as ftd
from nvflare.fuel.hci.base64_utils import (
b64str_to_binary_file,
b64str_to_bytes,
b64str_to_text_file,
binary_file_to_b64str,
bytes_to_b64str,
text_file_to_b64str,
)
from nvflare.fuel.hci.cmd_arg_utils import join_args
from nvflare.fuel.hci.proto import MetaKey, ProtoKey
from nvflare.fuel.hci.reg import CommandEntry, CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.table import Table
from nvflare.fuel.utils.zip_utils import split_path, unzip_all_from_bytes, unzip_all_from_file, zip_directory_to_bytes
from nvflare.lighter.utils import load_private_key_file, sign_folders
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from .api_spec import CommandContext, ReplyProcessor
from .api_status import APIStatus
def _server_cmd_name(name: str):
return ftd.SERVER_MODULE_NAME + "." + name
class _DownloadProcessor(ReplyProcessor):
"""Reply processor to handle downloads."""
def __init__(self, download_dir: str, str_to_file_func):
self.download_dir = download_dir
self.str_to_file_func = str_to_file_func
self.data_received = False
self.table = None
def reply_start(self, ctx: CommandContext, reply_json):
self.data_received = False
self.table = Table(["file", "size"])
def reply_done(self, ctx: CommandContext):
if not self.data_received:
ctx.set_command_result({"status": APIStatus.ERROR_PROTOCOL, "details": "protocol error - no data received"})
else:
command_result = ctx.get_command_result()
if command_result is None:
command_result = {}
command_result["status"] = APIStatus.SUCCESS
command_result["details"] = self.table
ctx.set_command_result(command_result)
def process_table(self, ctx: CommandContext, table: Table):
try:
rows = table.rows
if len(rows) < 1:
# no data
ctx.set_command_result({"status": APIStatus.ERROR_PROTOCOL, "details": "protocol error - no file data"})
return
for i in range(len(rows)):
if i == 0:
# this is header
continue
row = rows[i]
if len(row) < 1:
ctx.set_command_result(
{
"status": APIStatus.ERROR_PROTOCOL,
"details": "protocol error - missing file name",
}
)
return
if len(row) < 2:
ctx.set_command_result(
{
"status": APIStatus.ERROR_PROTOCOL,
"details": "protocol error - missing file data",
}
)
return
file_name = row[0]
encoded_str = row[1]
full_path = os.path.join(self.download_dir, file_name)
num_bytes = self.str_to_file_func(encoded_str, full_path)
self.table.add_row([file_name, str(num_bytes)])
self.data_received = True
except Exception as e:
secure_log_traceback()
ctx.set_command_result(
{
"status": APIStatus.ERROR_RUNTIME,
"details": f"exception processing file: {secure_format_exception(e)}",
}
)
class _DownloadFolderProcessor(ReplyProcessor):
"""Reply processor for handling downloading directories."""
def __init__(self, download_dir: str):
self.download_dir = download_dir
self.data_received = False
def reply_start(self, ctx: CommandContext, reply_json):
self.data_received = False
def reply_done(self, ctx: CommandContext):
if not self.data_received:
ctx.set_command_result({"status": APIStatus.ERROR_RUNTIME, "details": "protocol error - no data received"})
def process_error(self, ctx: CommandContext, err: str):
self.data_received = True
ctx.set_command_result({"status": APIStatus.ERROR_RUNTIME, "details": err})
def process_string(self, ctx: CommandContext, item: str):
try:
self.data_received = True
if item.startswith(ftd.DOWNLOAD_URL_MARKER):
ctx.set_command_result(
{
"status": APIStatus.SUCCESS,
"details": item,
}
)
else:
data_bytes = b64str_to_bytes(item)
unzip_all_from_bytes(data_bytes, self.download_dir)
ctx.set_command_result(
{
"status": APIStatus.SUCCESS,
"details": "Downloaded to dir {}".format(self.download_dir),
}
)
except Exception as e:
secure_log_traceback()
ctx.set_command_result(
{
"status": APIStatus.ERROR_RUNTIME,
"details": f"exception processing reply: {secure_format_exception(e)}",
}
)
class _FileReceiver:
def __init__(self, file_path):
self.file_path = file_path
self.tmp_name = f"{file_path}.tmp"
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if os.path.exists(file_path):
# remove existing file
os.remove(file_path)
self.tmp_file = open(self.tmp_name, "ab")
def close(self):
self.tmp_file.close()
os.rename(self.tmp_name, self.file_path)
def receive_data(self, data, start: int, length: int):
self.tmp_file.write(data[start : start + length])
class FileTransferModule(CommandModule):
"""Command module with commands relevant to file transfer."""
def __init__(self, upload_dir: str, download_dir: str):
if not os.path.isdir(upload_dir):
raise ValueError("upload_dir {} is not a valid dir".format(upload_dir))
if not os.path.isdir(download_dir):
raise ValueError("download_dir {} is not a valid dir".format(download_dir))
self.upload_dir = upload_dir
self.download_dir = download_dir
self.cmd_handlers = {
ftd.UPLOAD_FOLDER_FQN: self.upload_folder,
ftd.DOWNLOAD_FOLDER_FQN: self.download_folder,
ftd.PULL_BINARY_FQN: self.pull_binary_file,
ftd.PULL_FOLDER_FQN: self.pull_folder,
}
def get_spec(self):
return CommandModuleSpec(
name="file_transfer",
cmd_specs=[
CommandSpec(
name="upload_text",
description="upload one or more text files in the upload_dir",
usage="upload_text file_name ...",
handler_func=self.upload_text_file,
visible=False,
),
CommandSpec(
name="download_text",
description="download one or more text files in the download_dir",
usage="download_text file_name ...",
handler_func=self.download_text_file,
visible=False,
),
CommandSpec(
name="upload_binary",
description="upload one or more binary files in the upload_dir",
usage="upload_binary file_name ...",
handler_func=self.upload_binary_file,
visible=False,
),
CommandSpec(
name="download_binary",
description="download one or more binary files in the download_dir",
usage="download_binary file_name ...",
handler_func=self.download_binary_file,
visible=False,
),
CommandSpec(
name="pull_binary",
description="download one binary files in the download_dir",
usage="pull_binary control_id file_name",
handler_func=self.pull_binary_file,
visible=False,
),
CommandSpec(
name="upload_folder",
description="Submit application to the server",
usage="submit_job job_folder",
handler_func=self.upload_folder,
visible=False,
),
CommandSpec(
name="download_folder",
description="download job contents from the server",
usage="download_job job_id",
handler_func=self.download_folder,
visible=False,
),
CommandSpec(
name="info",
description="show folder setup info",
usage="info",
handler_func=self.info,
),
],
)
def generate_module_spec(self, server_cmd_spec: CommandSpec):
"""
Generate a new module spec based on a server command
Args:
server_cmd_spec:
Returns:
"""
# print('generating cmd module for {}'.format(server_cmd_spec.client_cmd))
if not server_cmd_spec.client_cmd:
return None
handler = self.cmd_handlers.get(server_cmd_spec.client_cmd)
if handler is None:
print("no cmd handler found for {}".format(server_cmd_spec.client_cmd))
return None
return CommandModuleSpec(
name=server_cmd_spec.scope_name,
cmd_specs=[
CommandSpec(
name=server_cmd_spec.name,
description=server_cmd_spec.description,
usage=server_cmd_spec.usage,
handler_func=handler,
visible=True,
)
],
)
def upload_file(self, args, ctx: CommandContext, cmd_name, file_to_str_func):
full_cmd_name = _server_cmd_name(cmd_name)
if len(args) < 2:
return {"status": APIStatus.ERROR_SYNTAX, "details": "syntax error: missing file names"}
parts = [full_cmd_name]
for i in range(1, len(args)):
file_name = args[i]
full_path = os.path.join(self.upload_dir, file_name)
if not os.path.isfile(full_path):
return {"status": APIStatus.ERROR_RUNTIME, "details": f"no such file: {full_path}"}
encoded_string = file_to_str_func(full_path)
parts.append(file_name)
parts.append(encoded_string)
command = join_args(parts)
api = ctx.get_api()
return api.server_execute(command)
def upload_text_file(self, args, ctx: CommandContext):
return self.upload_file(args, ctx, ftd.SERVER_CMD_UPLOAD_TEXT, text_file_to_b64str)
def upload_binary_file(self, args, ctx: CommandContext):
return self.upload_file(args, ctx, ftd.SERVER_CMD_UPLOAD_BINARY, binary_file_to_b64str)
def download_file(self, args, ctx: CommandContext, cmd_name, str_to_file_func):
full_cmd_name = _server_cmd_name(cmd_name)
if len(args) < 2:
return {"status": APIStatus.ERROR_SYNTAX, "details": "syntax error: missing file names"}
parts = [full_cmd_name]
for i in range(1, len(args)):
file_name = args[i]
parts.append(file_name)
command = join_args(parts)
reply_processor = _DownloadProcessor(self.download_dir, str_to_file_func)
api = ctx.get_api()
return api.server_execute(command, reply_processor)
def download_text_file(self, args, ctx: CommandContext):
return self.download_file(args, ctx, ftd.SERVER_CMD_DOWNLOAD_TEXT, b64str_to_text_file)
def download_binary_file(self, args, ctx: CommandContext):
return self.download_file(args, ctx, ftd.SERVER_CMD_DOWNLOAD_BINARY, b64str_to_binary_file)
def pull_binary_file(self, args, ctx: CommandContext):
cmd_entry = ctx.get_command_entry()
if len(args) != 3:
return {ProtoKey.STATUS: APIStatus.ERROR_SYNTAX, ProtoKey.DETAILS: "usage: {}".format(cmd_entry.usage)}
file_name = args[2]
control_id = args[1]
parts = [cmd_entry.full_command_name(), control_id, file_name]
command = join_args(parts)
file_path = os.path.join(self.download_dir, file_name)
receiver = _FileReceiver(file_path)
print(f"downloading file: {file_path}")
api = ctx.get_api()
ctx.set_bytes_receiver(receiver.receive_data)
result = api.server_execute(command, cmd_ctx=ctx)
if result.get(ProtoKey.STATUS) == APIStatus.SUCCESS:
receiver.close()
dir_name, ext = os.path.splitext(file_path)
if ext == ".zip":
# unzip the file
api.debug(f"unzipping file {file_path} to {dir_name}")
os.makedirs(dir_name, exist_ok=True)
unzip_all_from_file(file_path, dir_name)
return result
def pull_folder(self, args, ctx: CommandContext):
cmd_entry = ctx.get_command_entry()
if len(args) != 2:
return {ProtoKey.STATUS: APIStatus.ERROR_SYNTAX, ProtoKey.DETAILS: "usage: {}".format(cmd_entry.usage)}
folder_name = args[1]
parts = [cmd_entry.full_command_name(), folder_name]
command = join_args(parts)
api = ctx.get_api()
result = api.server_execute(command)
meta = result.get(ProtoKey.META)
if not meta:
return result
file_names = meta.get(MetaKey.FILES)
ctl_id = meta.get(MetaKey.CONTROL_ID)
api.debug(f"received ctl_id {ctl_id}, file names: {file_names}")
if not file_names:
return result
cmd_name = meta.get(MetaKey.CMD_NAME)
for file_name in file_names:
command = f"{cmd_name} {ctl_id} {file_name}"
reply = api.do_command(command)
if reply.get(ProtoKey.STATUS) != APIStatus.SUCCESS:
return reply
return {ProtoKey.STATUS: APIStatus.SUCCESS, ProtoKey.DETAILS: "OK"}
def upload_folder(self, args, ctx: CommandContext):
cmd_entry = ctx.get_command_entry()
assert isinstance(cmd_entry, CommandEntry)
if len(args) != 2:
return {"status": APIStatus.ERROR_SYNTAX, "details": "usage: {}".format(cmd_entry.usage)}
folder_name = args[1]
if folder_name.endswith("/"):
folder_name = folder_name.rstrip("/")
full_path = os.path.join(self.upload_dir, folder_name)
if not os.path.isdir(full_path):
return {"status": APIStatus.ERROR_RUNTIME, "details": f"'{full_path}' is not a valid folder."}
# sign folders and files
api = ctx.get_api()
if not api.insecure:
# we are not in POC mode
client_key_file_path = api.client_key
private_key = load_private_key_file(client_key_file_path)
sign_folders(full_path, private_key, api.client_cert)
# zip the data
data = zip_directory_to_bytes(self.upload_dir, folder_name)
folder_name = split_path(full_path)[1]
b64str = bytes_to_b64str(data)
parts = [cmd_entry.full_command_name(), folder_name, b64str]
command = join_args(parts)
return api.server_execute(command)
def download_folder(self, args, ctx: CommandContext):
cmd_entry = ctx.get_command_entry()
assert isinstance(cmd_entry, CommandEntry)
if len(args) != 2:
return {"status": APIStatus.ERROR_SYNTAX, "details": "usage: {}".format(cmd_entry.usage)}
job_id = args[1]
parts = [cmd_entry.full_command_name(), job_id]
command = join_args(parts)
reply_processor = _DownloadFolderProcessor(self.download_dir)
api = ctx.get_api()
return api.server_execute(command, reply_processor)
def info(self, args, ctx: CommandContext):
msg = f"Local Upload Source: {self.upload_dir}\n"
msg += f"Local Download Destination: {self.download_dir}\n"
return {"status": "ok", "details": msg}
| NVFlare-main | nvflare/fuel/hci/client/file_transfer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .api_spec import ServiceFinder
class StaticServiceFinder(ServiceFinder):
def __init__(self, host: str, port: int):
self.host = host
self.port = port
self.ssid = "1234"
def start(self, service_address_changed_cb):
service_address_changed_cb(self.host, self.port, self.ssid)
def stop(self):
pass
| NVFlare-main | nvflare/fuel/hci/client/static_service_finder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/hci/client/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from .api_spec import ServiceFinder
class RRServiceFinder(ServiceFinder):
def __init__(self, change_interval, host1: str, port1: int, host2: str, port2: int):
self.host1 = host1
self.port1 = port1
self.host2 = host2
self.port2 = port2
self.change_interval = change_interval
self.thread = None
self.stop_asked = False
def start(self, service_address_changed_cb):
self.thread = threading.Thread(target=self._gen_address, args=(service_address_changed_cb,), daemon=True)
self.thread.start()
def _gen_address(self, service_address_changed_cb):
last_port = self.port1
last_change_time = None
while True:
if self.stop_asked:
return
if not last_change_time or time.time() - last_change_time >= self.change_interval:
last_change_time = time.time()
if last_port == self.port1:
h = self.host2
p = self.port2
else:
h = self.host1
p = self.port1
service_address_changed_cb(h, p, "1234")
last_port = p
time.sleep(0.2)
def stop(self):
self.stop_asked = True
if self.thread and self.thread.is_alive():
self.thread.join()
print("Service finder stopped")
| NVFlare-main | nvflare/fuel/hci/client/rr_service_finder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class APIStatus(str, Enum):
"""Constants for the valid status options for the status of FLAdminAPIResponse."""
SUCCESS = "SUCCESS" # command issues successfully
ERROR_PROTOCOL = (
"ERROR_PROTOCOL" # the payload/data is not following the correct format/protocol expected by the server
)
ERROR_CERT = "ERROR_CERT" # key or certs are incorrect
ERROR_AUTHENTICATION = "ERROR_AUTHENTICATION" # authentication failed, need to log in
ERROR_AUTHORIZATION = "ERROR_AUTHORIZATION" # authorization failed, permissions
ERROR_SYNTAX = "ERROR_SYNTAX" # command syntax incorrect
ERROR_RUNTIME = "ERROR_RUNTIME" # various errors at runtime depending on the command
ERROR_INVALID_CLIENT = "ERROR_INVALID_CLIENT" # wrong/invalid client names exists in command
ERROR_INACTIVE_SESSION = "ERROR_INACTIVE_SESSION" # admin client session is inactive
ERROR_SERVER_CONNECTION = "ERROR_SERVER_CONNECTION" # server connection error
| NVFlare-main | nvflare/fuel/hci/client/api_status.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import socket
import ssl
import threading
import time
from datetime import datetime
from typing import List, Optional
from nvflare.fuel.hci.client.event import EventContext, EventHandler, EventPropKey, EventType
from nvflare.fuel.hci.cmd_arg_utils import split_to_args
from nvflare.fuel.hci.conn import Connection, receive_and_process, receive_bytes_and_process
from nvflare.fuel.hci.proto import ConfirmMethod, InternalCommands, MetaKey, ProtoKey, make_error
from nvflare.fuel.hci.reg import CommandEntry, CommandModule, CommandRegister
from nvflare.fuel.hci.table import Table
from nvflare.fuel.utils.fsm import FSM, State
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from .api_spec import (
AdminAPISpec,
ApiPocValue,
CommandContext,
CommandCtxKey,
CommandInfo,
ReplyProcessor,
ServiceFinder,
)
from .api_status import APIStatus
_CMD_TYPE_UNKNOWN = 0
_CMD_TYPE_CLIENT = 1
_CMD_TYPE_SERVER = 2
MAX_AUTO_LOGIN_TRIES = 300
AUTO_LOGIN_INTERVAL = 1.0
class ResultKey(object):
STATUS = ProtoKey.STATUS
DETAILS = ProtoKey.DETAILS
META = ProtoKey.META
class _ServerReplyJsonProcessor(object):
def __init__(self, ctx: CommandContext):
if not isinstance(ctx, CommandContext):
raise TypeError(f"ctx is not an instance of CommandContext. but get {type(ctx)}")
self.ctx = ctx
def process_server_reply(self, resp):
"""Process the server reply and store the status/details into API's `command_result`
NOTE: this func is used for receive_and_process(), which is defined by conn!
This method does not tale CommandContext!
Args:
resp: The raw response that returns by the server.
"""
api = self.ctx.get_api()
api.debug("Server Reply: {}".format(resp))
ctx = self.ctx
# this resp is what is usually directly used to return, straight from server
ctx.set_command_result(resp)
reply_processor = ctx.get_reply_processor()
if reply_processor is None:
reply_processor = _DefaultReplyProcessor()
reply_processor.reply_start(ctx, resp)
if resp is not None:
data = resp[ProtoKey.DATA]
for item in data:
it = item[ProtoKey.TYPE]
if it == ProtoKey.STRING:
reply_processor.process_string(ctx, item[ProtoKey.DATA])
elif it == ProtoKey.SUCCESS:
reply_processor.process_success(ctx, item[ProtoKey.DATA])
elif it == ProtoKey.ERROR:
reply_processor.process_error(ctx, item[ProtoKey.DATA])
break
elif it == ProtoKey.TABLE:
table = Table(None)
table.set_rows(item[ProtoKey.ROWS])
reply_processor.process_table(ctx, table)
elif it == ProtoKey.DICT:
reply_processor.process_dict(ctx, item[ProtoKey.DATA])
elif it == ProtoKey.TOKEN:
reply_processor.process_token(ctx, item[ProtoKey.DATA])
elif it == ProtoKey.SHUTDOWN:
reply_processor.process_shutdown(ctx, item[ProtoKey.DATA])
break
else:
reply_processor.protocol_error(ctx, "Invalid item type: " + it)
break
meta = resp.get(ProtoKey.META)
if meta:
ctx.set_meta(meta)
else:
reply_processor.protocol_error(ctx, "Protocol Error")
reply_processor.reply_done(ctx)
class _DefaultReplyProcessor(ReplyProcessor):
def process_shutdown(self, ctx: CommandContext, msg: str):
api = ctx.get_prop(CommandCtxKey.API)
api.shutdown_received = True
api.shutdown_msg = msg
class _LoginReplyProcessor(ReplyProcessor):
"""Reply processor for handling login and setting the token for the admin client."""
def process_string(self, ctx: CommandContext, item: str):
api = ctx.get_api()
api.login_result = item
def process_token(self, ctx: CommandContext, token: str):
api = ctx.get_api()
api.token = token
class _CmdListReplyProcessor(ReplyProcessor):
"""Reply processor to register available commands after getting back a table of commands from the server."""
def process_table(self, ctx: CommandContext, table: Table):
api = ctx.get_api()
for i in range(len(table.rows)):
if i == 0:
# this is header
continue
row = table.rows[i]
if len(row) < 5:
return
scope = row[0]
cmd_name = row[1]
desc = row[2]
usage = row[3]
confirm = row[4]
client_cmd = None
visible = True
if len(row) > 5:
client_cmd = row[5]
if len(row) > 6:
visible = row[6].lower() in ["true", "yes"]
# if confirm == 'auth' and not client.require_login:
# the user is not authenticated - skip this command
# continue
api.server_cmd_reg.add_command(
scope_name=scope,
cmd_name=cmd_name,
desc=desc,
usage=usage,
handler=None,
authz_func=None,
visible=visible,
confirm=confirm,
client_cmd=client_cmd,
map_client_cmd=True,
)
api.server_cmd_received = True
_STATE_NAME_WAIT_FOR_SERVER_ADDR = "wait_for_server_addr"
_STATE_NAME_LOGIN = "login"
_STATE_NAME_OPERATE = "operate"
_SESSION_LOGGING_OUT = "session is logging out"
class _WaitForServerAddress(State):
def __init__(self, api):
State.__init__(self, _STATE_NAME_WAIT_FOR_SERVER_ADDR)
self.api = api
def execute(self, **kwargs):
api = self.api
api.fire_session_event(EventType.WAIT_FOR_SERVER_ADDR, "Trying to obtain server address")
with api.new_addr_lock:
if api.new_host and api.new_port and api.new_ssid:
api.fire_session_event(
EventType.SERVER_ADDR_OBTAINED, f"Obtained server address: {api.new_host}:{api.new_port}"
)
return _STATE_NAME_LOGIN
else:
# stay here
return ""
class _TryLogin(State):
def __init__(self, api):
State.__init__(self, _STATE_NAME_LOGIN)
self.api = api
def enter(self):
api = self.api
api.server_sess_active = False
# use lock here since the service finder (in another thread) could change the
# address at this moment
with api.new_addr_lock:
new_host = api.new_host
new_port = api.new_port
new_ssid = api.new_ssid
# set the address for login
with api.addr_lock:
api.host = new_host
api.port = new_port
api.ssid = new_ssid
def execute(self, **kwargs):
api = self.api
api.fire_session_event(EventType.BEFORE_LOGIN, "")
result = api.auto_login()
if result[ResultKey.STATUS] == APIStatus.SUCCESS:
api.server_sess_active = True
api.fire_session_event(
EventType.LOGIN_SUCCESS, f"Logged into server at {api.host}:{api.port} with SSID: {api.ssid}"
)
return _STATE_NAME_OPERATE
details = result.get(ResultKey.DETAILS, "")
if details != _SESSION_LOGGING_OUT:
api.fire_session_event(EventType.LOGIN_FAILURE, details)
return FSM.STATE_NAME_EXIT
class _Operate(State):
def __init__(self, api, sess_check_interval):
State.__init__(self, _STATE_NAME_OPERATE)
self.api = api
self.last_sess_check_time = None
self.sess_check_interval = sess_check_interval
def enter(self):
self.api.server_sess_active = True
def execute(self, **kwargs):
# check whether server addr has changed
api = self.api
with api.new_addr_lock:
new_host = api.new_host
new_port = api.new_port
new_ssid = api.new_ssid
with api.addr_lock:
cur_host = api.host
cur_port = api.port
cur_ssid = api.ssid
if new_host != cur_host or new_port != cur_port or cur_ssid != new_ssid:
# need to re-login
api.fire_session_event(EventType.SP_ADDR_CHANGED, f"Server address changed to {new_host}:{new_port}")
return _STATE_NAME_LOGIN
# check server session status
if not self.sess_check_interval:
return ""
if not self.last_sess_check_time or time.time() - self.last_sess_check_time >= self.sess_check_interval:
self.last_sess_check_time = time.time()
result = api.check_session_status_on_server()
details = result.get(ResultKey.DETAILS, "")
status = result[ResultKey.STATUS]
if status in APIStatus.ERROR_INACTIVE_SESSION:
if details != _SESSION_LOGGING_OUT:
api.fire_session_event(EventType.SESSION_TIMEOUT, details)
# end the session
return FSM.STATE_NAME_EXIT
return ""
class AdminAPI(AdminAPISpec):
def __init__(
self,
user_name: str,
service_finder: ServiceFinder,
ca_cert: str = "",
client_cert: str = "",
client_key: str = "",
upload_dir: str = "",
download_dir: str = "",
cmd_modules: Optional[List] = None,
insecure: bool = False,
debug: bool = False,
session_timeout_interval=None,
session_status_check_interval=None,
auto_login_max_tries: int = 5,
event_handlers=None,
):
"""API to keep certs, keys and connection information and to execute admin commands through do_command.
Args:
ca_cert: path to CA Cert file, by default provisioned rootCA.pem
client_cert: path to admin client Cert file, by default provisioned as client.crt
client_key: path to admin client Key file, by default provisioned as client.key
upload_dir: File transfer upload directory. Folders uploaded to the server to be deployed must be here. Folder must already exist and be accessible.
download_dir: File transfer download directory. Can be same as upload_dir. Folder must already exist and be accessible.
cmd_modules: command modules to load and register. Note that FileTransferModule is initialized here with upload_dir and download_dir if cmd_modules is None.
service_finder: used to obtain the primary service provider to set the host and port of the active server
user_name: Username to authenticate with FL server
insecure: Whether to enable secure mode with secure communication.
debug: Whether to print debug messages, which can help with diagnosing problems. False by default.
session_timeout_interval: if specified, automatically close the session after inactive for this long, unit is second
session_status_check_interval: how often to check session status with server, unit is second
auto_login_max_tries: maximum number of tries to auto-login.
"""
super().__init__()
if cmd_modules is None:
from .file_transfer import FileTransferModule
cmd_modules = [FileTransferModule(upload_dir=upload_dir, download_dir=download_dir)]
elif not isinstance(cmd_modules, list):
raise TypeError("cmd_modules must be a list, but got {}".format(type(cmd_modules)))
else:
for m in cmd_modules:
if not isinstance(m, CommandModule):
raise TypeError(
"cmd_modules must be a list of CommandModule, but got element of type {}".format(type(m))
)
if not isinstance(service_finder, ServiceFinder):
raise TypeError("service_finder should be ServiceFinder but got {}".format(type(service_finder)))
cmd_module = service_finder.get_command_module()
if cmd_module:
cmd_modules.append(cmd_module)
if event_handlers:
if not isinstance(event_handlers, list):
raise TypeError(f"event_handlers must be a list but got {type(event_handlers)}")
for h in event_handlers:
if not isinstance(h, EventHandler):
raise TypeError(f"item in event_handlers must be EventHandler but got {type(h)}")
self.event_handlers = event_handlers
self.service_finder = service_finder
self.host = None
self.port = None
self.ssid = None
self.addr_lock = threading.Lock()
self.new_host = None
self.new_port = None
self.new_ssid = None
self.new_addr_lock = threading.Lock()
self.poc_key = None
self.insecure = insecure
if self.insecure:
self.poc_key = ApiPocValue.ADMIN
else:
if len(ca_cert) <= 0:
raise Exception("missing CA Cert file name")
self.ca_cert = ca_cert
if len(client_cert) <= 0:
raise Exception("missing Client Cert file name")
self.client_cert = client_cert
if len(client_key) <= 0:
raise Exception("missing Client Key file name")
self.client_key = client_key
self.service_finder.set_secure_context(
ca_cert_path=self.ca_cert, cert_path=self.client_cert, private_key_path=self.client_key
)
self._debug = debug
self.cmd_timeout = None
# for login
self.token = None
self.login_result = None
if not user_name:
raise Exception("user_name is required.")
self.user_name = user_name
self.server_cmd_reg = CommandRegister(app_ctx=self)
self.client_cmd_reg = CommandRegister(app_ctx=self)
self.server_cmd_received = False
self.all_cmds = []
self.cmd_modules = cmd_modules
# for shutdown
self.shutdown_received = False
self.shutdown_msg = None
self.server_sess_active = False
self.shutdown_asked = False
self.sess_monitor_thread = None
self.sess_monitor_active = False
# create the FSM for session monitoring
if auto_login_max_tries < 0 or auto_login_max_tries > MAX_AUTO_LOGIN_TRIES:
raise ValueError(f"auto_login_max_tries is out of range: [0, {MAX_AUTO_LOGIN_TRIES}]")
self.auto_login_max_tries = auto_login_max_tries
fsm = FSM("session monitor")
fsm.add_state(_WaitForServerAddress(self))
fsm.add_state(_TryLogin(self))
fsm.add_state(_Operate(self, session_status_check_interval))
self.fsm = fsm
self.session_timeout_interval = session_timeout_interval
self.last_sess_activity_time = time.time()
self.closed = False
self.in_logout = False
self.service_finder.start(self._handle_sp_address_change)
self._start_session_monitor()
def debug(self, msg):
if self._debug:
print(f"DEBUG: {msg}")
def fire_event(self, event_type: str, ctx: EventContext):
self.debug(f"firing event {event_type}")
if self.event_handlers:
for h in self.event_handlers:
h.handle_event(event_type, ctx)
def set_command_timeout(self, timeout: float):
if not isinstance(timeout, (int, float)):
raise TypeError(f"timeout must be a number but got {type(timeout)}")
timeout = float(timeout)
if timeout <= 0.0:
raise ValueError(f"invalid timeout value {timeout} - must be > 0.0")
self.cmd_timeout = timeout
def unset_command_timeout(self):
self.cmd_timeout = None
def _new_event_context(self):
ctx = EventContext()
ctx.set_prop(EventPropKey.USER_NAME, self.user_name)
return ctx
def fire_session_event(self, event_type: str, msg: str = ""):
ctx = self._new_event_context()
if msg:
ctx.set_prop(EventPropKey.MSG, msg)
self.fire_event(event_type, ctx)
def _handle_sp_address_change(self, host: str, port: int, ssid: str):
with self.addr_lock:
if host == self.host and port == self.port and ssid == self.ssid:
# no change
return
with self.new_addr_lock:
self.new_host = host
self.new_port = port
self.new_ssid = ssid
def _try_auto_login(self):
resp = None
for i in range(self.auto_login_max_tries):
try:
self.fire_session_event(EventType.TRYING_LOGIN, "Trying to login, please wait ...")
except Exception as ex:
print(f"exception handling event {EventType.TRYING_LOGIN}: {secure_format_exception(ex)}")
return {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: f"exception handling event {EventType.TRYING_LOGIN}",
}
if self.insecure:
resp = self.login_with_insecure(username=self.user_name, poc_key=self.poc_key)
else:
resp = self.login(username=self.user_name)
if resp[ResultKey.STATUS] in [APIStatus.SUCCESS, APIStatus.ERROR_AUTHENTICATION, APIStatus.ERROR_CERT]:
return resp
time.sleep(AUTO_LOGIN_INTERVAL)
if resp is None:
resp = {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: f"Auto login failed after {self.auto_login_max_tries} tries",
}
return resp
def auto_login(self):
try:
result = self._try_auto_login()
self.debug(f"login result is {result}")
except Exception as e:
result = {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: f"Exception occurred ({secure_format_exception(e)}) when trying to login - please try later",
}
return result
def _load_client_cmds_from_modules(self, cmd_modules):
if cmd_modules:
for m in cmd_modules:
self.client_cmd_reg.register_module(m, include_invisible=False)
def _load_client_cmds_from_module_specs(self, cmd_module_specs):
if cmd_module_specs:
for m in cmd_module_specs:
self.client_cmd_reg.register_module_spec(m, include_invisible=False)
def register_command(self, cmd_entry):
self.all_cmds.append(cmd_entry.name)
def _start_session_monitor(self, interval=0.2):
self.sess_monitor_thread = threading.Thread(target=self._monitor_session, args=(interval,), daemon=True)
self.sess_monitor_active = True
self.sess_monitor_thread.daemon = True
self.sess_monitor_thread.start()
def _close_session_monitor(self):
self.sess_monitor_active = False
if self.sess_monitor_thread:
self.sess_monitor_thread = None
self.debug("session monitor closed!")
def check_session_status_on_server(self):
return self.server_execute("_check_session")
def _do_monitor_session(self, interval):
self.fsm.set_current_state(_STATE_NAME_WAIT_FOR_SERVER_ADDR)
while True:
time.sleep(interval)
if not self.sess_monitor_active:
return ""
if self.shutdown_asked:
return ""
if self.shutdown_received:
return ""
# see whether the session should be timed out for inactivity
if (
self.last_sess_activity_time
and self.session_timeout_interval
and time.time() - self.last_sess_activity_time > self.session_timeout_interval
):
return "Your session is ended due to inactivity"
next_state = self.fsm.execute()
if next_state is None:
if self.fsm.error:
return self.fsm.error
else:
return ""
def _monitor_session(self, interval):
try:
msg = self._do_monitor_session(interval)
except Exception as e:
msg = f"exception occurred: {secure_format_exception(e)}"
self.server_sess_active = False
try:
self.fire_session_event(EventType.SESSION_CLOSED, msg)
except Exception as ex:
self.debug(f"exception occurred handling event {EventType.SESSION_CLOSED}: {secure_format_exception(ex)}")
pass
# this is in the session_monitor thread - do not close the monitor, or we'll run into
# "cannot join current thread" error!
self.close(close_session_monitor=False)
def logout(self):
"""Send logout command to server."""
self.in_logout = True
resp = self.server_execute(InternalCommands.LOGOUT)
self.close()
return resp
def close(self, close_session_monitor: bool = True):
# this method can be called multiple times
if self.closed:
return
self.closed = True
self.service_finder.stop()
self.server_sess_active = False
self.shutdown_asked = True
if close_session_monitor:
self._close_session_monitor()
def _get_command_list_from_server(self) -> bool:
self.server_cmd_received = False
self.server_execute(InternalCommands.GET_CMD_LIST, _CmdListReplyProcessor())
self.server_cmd_reg.finalize(self.register_command)
if not self.server_cmd_received:
return False
return True
def _login(self) -> dict:
result = self._get_command_list_from_server()
if not result:
return {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: "Can't fetch command list from server.",
}
# prepare client modules
# we may have additional dynamically created cmd modules based on server commands
extra_module_specs = []
if self.server_cmd_reg.mapped_cmds:
for c in self.server_cmd_reg.mapped_cmds:
for m in self.cmd_modules:
new_module_spec = m.generate_module_spec(c)
if new_module_spec is not None:
extra_module_specs.append(new_module_spec)
self._load_client_cmds_from_modules(self.cmd_modules)
if extra_module_specs:
self._load_client_cmds_from_module_specs(extra_module_specs)
self.client_cmd_reg.finalize(self.register_command)
self.server_sess_active = True
return {ResultKey.STATUS: APIStatus.SUCCESS, ResultKey.DETAILS: "Login success"}
def is_ready(self) -> bool:
"""Whether the API is ready for executing commands."""
return self.server_sess_active
def login(self, username: str):
"""Login using certification files and retrieve server side commands.
Args:
username: Username
Returns:
A dict of status and details
"""
self.login_result = None
self.server_execute(f"{InternalCommands.CERT_LOGIN} {username}", _LoginReplyProcessor())
if self.login_result is None:
return {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: "Communication Error - please try later",
}
elif self.login_result == "REJECT":
return {ResultKey.STATUS: APIStatus.ERROR_CERT, ResultKey.DETAILS: "Incorrect user name or certificate"}
return self._login()
def login_with_insecure(self, username: str, poc_key: str):
"""Login using key without certificates (POC has been updated so this should not be used for POC anymore).
Args:
username: Username
poc_key: key used for insecure admin login
Returns:
A dict of login status and details
"""
self.login_result = None
self.server_execute(f"{InternalCommands.PWD_LOGIN} {username} {poc_key}", _LoginReplyProcessor())
if self.login_result is None:
return {
ResultKey.STATUS: APIStatus.ERROR_RUNTIME,
ResultKey.DETAILS: "Communication Error - please try later",
}
elif self.login_result == "REJECT":
return {
ResultKey.STATUS: APIStatus.ERROR_AUTHENTICATION,
ResultKey.DETAILS: "Incorrect user name or password",
}
return self._login()
def _send_to_sock(self, sock, ctx: CommandContext):
command = ctx.get_command()
json_processor = ctx.get_json_processor()
process_json_func = json_processor.process_server_reply
conn = Connection(sock, self)
conn.append_command(command)
if self.token:
conn.append_token(self.token)
if self.cmd_timeout:
conn.update_meta({MetaKey.CMD_TIMEOUT: self.cmd_timeout})
custom_props = ctx.get_custom_props()
if custom_props:
conn.update_meta({MetaKey.CUSTOM_PROPS: custom_props})
conn.close()
receive_bytes_func = ctx.get_bytes_receiver()
if receive_bytes_func is not None:
self.debug("receive_bytes_and_process ...")
ok = receive_bytes_and_process(sock, receive_bytes_func)
if ok:
ctx.set_command_result({"status": APIStatus.SUCCESS, "details": "OK"})
else:
ctx.set_command_result({"status": APIStatus.ERROR_RUNTIME, "details": "error receive_bytes"})
else:
self.debug("receive_and_process ...")
ok = receive_and_process(sock, process_json_func)
if not ok:
process_json_func(
make_error("Failed to communicate with Admin Server {} on {}".format(self.host, self.port))
)
else:
self.debug("reply received!")
def _try_command(self, cmd_ctx: CommandContext):
"""Try to execute a command on server side.
Args:
cmd_ctx: The command to execute.
"""
# process_json_func can't return data because how "receive_and_process" is written.
self.debug(f"sending command '{cmd_ctx.get_command()}'")
json_processor = _ServerReplyJsonProcessor(cmd_ctx)
process_json_func = json_processor.process_server_reply
cmd_ctx.set_json_processor(json_processor)
event_ctx = self._new_event_context()
event_ctx.set_prop(EventPropKey.CMD_NAME, cmd_ctx.get_command_name())
event_ctx.set_prop(EventPropKey.CMD_CTX, cmd_ctx)
try:
self.fire_event(EventType.BEFORE_EXECUTE_CMD, event_ctx)
except Exception as ex:
secure_log_traceback()
process_json_func(
make_error(f"exception handling event {EventType.BEFORE_EXECUTE_CMD}: {secure_format_exception(ex)}")
)
return
# see whether any event handler has set "custom_props"
custom_props = event_ctx.get_prop(EventPropKey.CUSTOM_PROPS)
if custom_props:
cmd_ctx.set_custom_props(custom_props)
with self.addr_lock:
sp_host = self.host
sp_port = self.port
self.debug(f"use server address {sp_host}:{sp_port}")
try:
if not self.insecure:
# SSL communication
ssl_ctx = ssl.create_default_context()
ssl_ctx.minimum_version = ssl.TLSVersion.TLSv1_2
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
ssl_ctx.check_hostname = False
ssl_ctx.load_verify_locations(self.ca_cert)
ssl_ctx.load_cert_chain(certfile=self.client_cert, keyfile=self.client_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
with ssl_ctx.wrap_socket(sock, server_hostname=sp_host) as ssock:
ssock.connect((sp_host, sp_port))
self._send_to_sock(ssock, cmd_ctx)
else:
# without certs
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((sp_host, sp_port))
self._send_to_sock(sock, cmd_ctx)
except Exception as e:
if self._debug:
secure_log_traceback()
process_json_func(
make_error(
"Failed to communicate with Admin Server {} on {}: {}".format(
sp_host, sp_port, secure_format_exception(e)
)
)
)
def _get_command_detail(self, command):
"""Get command details
Args:
command (str): command
Returns: tuple of (cmd_type, cmd_name, args, entries)
"""
args = split_to_args(command)
cmd_name = args[0]
# check client side commands
entries = self.client_cmd_reg.get_command_entries(cmd_name)
if len(entries) > 0:
return _CMD_TYPE_CLIENT, cmd_name, args, entries
# check server side commands
entries = self.server_cmd_reg.get_command_entries(cmd_name)
if len(entries) > 0:
return _CMD_TYPE_SERVER, cmd_name, args, entries
return _CMD_TYPE_UNKNOWN, cmd_name, args, None
def check_command(self, command: str) -> CommandInfo:
"""Checks the specified command for processing info
Args:
command: command to be checked
Returns: command processing info
"""
cmd_type, cmd_name, args, entries = self._get_command_detail(command)
if cmd_type == _CMD_TYPE_UNKNOWN:
return CommandInfo.UNKNOWN
if len(entries) > 1:
return CommandInfo.AMBIGUOUS
ent = entries[0]
assert isinstance(ent, CommandEntry)
if ent.confirm == ConfirmMethod.AUTH:
return CommandInfo.CONFIRM_AUTH
elif ent.confirm == ConfirmMethod.PASSWORD:
return CommandInfo.CONFIRM_PWD
elif ent.confirm == ConfirmMethod.USER_NAME:
return CommandInfo.CONFIRM_USER_NAME
elif ent.confirm == ConfirmMethod.YESNO:
return CommandInfo.CONFIRM_YN
else:
return CommandInfo.OK
def _new_command_context(self, command, args, ent: CommandEntry):
ctx = CommandContext()
ctx.set_api(self)
ctx.set_command(command)
ctx.set_command_args(args)
ctx.set_command_entry(ent)
return ctx
def _do_client_command(self, command, args, ent: CommandEntry):
ctx = self._new_command_context(command, args, ent)
return_result = ent.handler(args, ctx)
result = ctx.get_command_result()
if return_result:
return return_result
if result is None:
return {ResultKey.STATUS: APIStatus.ERROR_RUNTIME, ResultKey.DETAILS: "Client did not respond"}
return result
def do_command(self, command):
"""A convenient method to call commands using string.
Args:
command (str): command
Returns:
Object containing status and details (or direct response from server, which originally was just time and data)
"""
self.last_sess_activity_time = time.time()
cmd_type, cmd_name, args, entries = self._get_command_detail(command)
if cmd_type == _CMD_TYPE_UNKNOWN:
return {
ResultKey.STATUS: APIStatus.ERROR_SYNTAX,
ResultKey.DETAILS: f"Command {cmd_name} not found",
}
if len(entries) > 1:
return {
ResultKey.STATUS: APIStatus.ERROR_SYNTAX,
ResultKey.DETAILS: f"Ambiguous command {cmd_name} - qualify with scope",
}
ent = entries[0]
if cmd_type == _CMD_TYPE_CLIENT:
return self._do_client_command(command=command, args=args, ent=ent)
# server command
if not self.server_sess_active:
return {
ResultKey.STATUS: APIStatus.ERROR_INACTIVE_SESSION,
ResultKey.DETAILS: "Session is inactive, please try later",
}
return self.server_execute(command, cmd_entry=ent)
def server_execute(self, command, reply_processor=None, cmd_entry=None, cmd_ctx=None):
if self.in_logout:
return {ResultKey.STATUS: APIStatus.SUCCESS, ResultKey.DETAILS: "session is logging out"}
args = split_to_args(command)
if cmd_ctx:
ctx = cmd_ctx
else:
ctx = self._new_command_context(command, args, cmd_entry)
start = time.time()
ctx.set_reply_processor(reply_processor)
self._try_command(ctx)
secs = time.time() - start
usecs = int(secs * 1000000)
self.debug(f"server_execute Done [{usecs} usecs] {datetime.now()}")
result = ctx.get_command_result()
meta = ctx.get_meta()
if result is None:
return {ResultKey.STATUS: APIStatus.ERROR_SERVER_CONNECTION, ResultKey.DETAILS: "Server did not respond"}
if meta:
result[ResultKey.META] = meta
if ResultKey.STATUS not in result:
result[ResultKey.STATUS] = self._determine_api_status(result)
return result
def _determine_api_status(self, result):
status = result.get(ResultKey.STATUS)
if status:
return status
data = result.get(ProtoKey.DATA)
if not data:
return APIStatus.ERROR_RUNTIME
reply_data_list = []
for d in data:
if isinstance(d, dict):
t = d.get(ProtoKey.TYPE)
if t == ProtoKey.SUCCESS:
return APIStatus.SUCCESS
if t == ProtoKey.STRING or t == ProtoKey.ERROR:
reply_data_list.append(d[ProtoKey.DATA])
reply_data_full_response = "\n".join(reply_data_list)
if "session_inactive" in reply_data_full_response:
return APIStatus.ERROR_INACTIVE_SESSION
if "wrong server" in reply_data_full_response:
return APIStatus.ERROR_SERVER_CONNECTION
if "Failed to communicate" in reply_data_full_response:
return APIStatus.ERROR_SERVER_CONNECTION
if "invalid client" in reply_data_full_response:
return APIStatus.ERROR_INVALID_CLIENT
if "unknown site" in reply_data_full_response:
return APIStatus.ERROR_INVALID_CLIENT
if "not authorized" in reply_data_full_response:
return APIStatus.ERROR_AUTHORIZATION
return APIStatus.SUCCESS
| NVFlare-main | nvflare/fuel/hci/client/api.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmd
import getpass
import json
import os
import signal
import time
from datetime import datetime
from pathlib import Path
from typing import List, Optional
try:
import readline
except ImportError:
readline = None
from nvflare.fuel.hci.cmd_arg_utils import join_args, split_to_args
from nvflare.fuel.hci.proto import CredentialType, ProtoKey
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandRegister, CommandSpec
from nvflare.fuel.hci.security import hash_password, verify_password
from nvflare.fuel.hci.table import Table
from nvflare.security.logging import secure_format_exception, secure_log_traceback
from .api import AdminAPI, CommandInfo
from .api_spec import ServiceFinder
from .api_status import APIStatus
from .event import EventContext, EventHandler, EventPropKey, EventType
class _BuiltInCmdModule(CommandModule):
def get_spec(self):
return CommandModuleSpec(
name="",
cmd_specs=[
CommandSpec(name="bye", description="exit from the client", usage="bye", handler_func=None),
CommandSpec(name="help", description="get command help information", usage="help", handler_func=None),
CommandSpec(
name="lpwd", description="print local work dir of the admin client", usage="lpwd", handler_func=None
),
CommandSpec(
name="timeout", description="set/show command timeout", usage="timeout [value]", handler_func=None
),
],
)
class AdminClient(cmd.Cmd, EventHandler):
"""Admin command prompt for submitting admin commands to the server through the CLI.
Args:
prompt: prompt to use for the command prompt
ca_cert: path to CA Cert file, by default provisioned rootCA.pem
client_cert: path to admin client Cert file, by default provisioned as client.crt
client_key: path to admin client Key file, by default provisioned as client.key
credential_type: what type of credential to use
cmd_modules: command modules to load and register
service_finder: used to obtain the primary service provider to set the host and port of the active server
debug: whether to print debug messages. False by default.
cli_history_size: the maximum number of commands to save in the cli history file. Defaults to 1000.
"""
def __init__(
self,
prompt: str = "> ",
credential_type: CredentialType = CredentialType.PASSWORD,
ca_cert=None,
client_cert=None,
client_key=None,
upload_dir="",
download_dir="",
cmd_modules: Optional[List] = None,
service_finder: ServiceFinder = None,
session_timeout_interval=900, # close the client after 15 minutes of inactivity
debug: bool = False,
username: str = "",
handlers=None,
cli_history_dir: str = str(Path.home() / ".nvflare"),
cli_history_size: int = 1000,
):
super().__init__()
self.intro = "Type help or ? to list commands.\n"
self.prompt = prompt
self.user_name = "admin"
self.pwd = None
self.credential_type = credential_type
self.service_finder = service_finder
self.debug = debug
self.out_file = None
self.no_stdout = False
self.stopped = False # use this flag to prevent unnecessary signal exception
self.username = username
if not isinstance(service_finder, ServiceFinder):
raise TypeError("service_finder must be ServiceProvider but got {}.".format(type(service_finder)))
if not isinstance(credential_type, CredentialType):
raise TypeError("invalid credential_type {}".format(credential_type))
if not cli_history_dir:
raise Exception("missing cli_history_dir")
modules = [_BuiltInCmdModule()]
if cmd_modules:
if not isinstance(cmd_modules, list):
raise TypeError("cmd_modules must be a list.")
for m in cmd_modules:
if not isinstance(m, CommandModule):
raise TypeError("cmd_modules must be a list of CommandModule")
modules.append(m)
insecure = True if self.credential_type == CredentialType.PASSWORD else False
self._get_login_creds()
event_handlers = [self]
if handlers:
event_handlers.extend(handlers)
self.api = AdminAPI(
ca_cert=ca_cert,
client_cert=client_cert,
client_key=client_key,
upload_dir=upload_dir,
download_dir=download_dir,
cmd_modules=modules,
service_finder=self.service_finder,
user_name=self.user_name,
debug=self.debug,
insecure=insecure,
session_timeout_interval=session_timeout_interval,
session_status_check_interval=1800, # check server for session status every 30 minutes
event_handlers=event_handlers,
)
if not os.path.isdir(cli_history_dir):
os.mkdir(cli_history_dir)
self.cli_history_file = os.path.join(cli_history_dir, ".admin_cli_history")
if readline:
readline.set_history_length(cli_history_size)
# signal.signal(signal.SIGUSR1, partial(self.session_signal_handler))
signal.signal(signal.SIGUSR1, self.session_signal_handler)
def handle_event(self, event_type: str, ctx: EventContext):
if self.debug:
print(f"DEBUG: received session event: {event_type}")
msg = ctx.get_prop(EventPropKey.MSG)
if msg:
self.write_string(msg)
if event_type == EventType.SESSION_CLOSED:
os.kill(os.getpid(), signal.SIGUSR1)
def session_signal_handler(self, signum, frame):
if self.stopped:
return
# the signal is only for the main thread
# the session monitor thread signals the main thread to stop
if self.debug:
print("DEBUG: signal received to close session")
self.api.close()
# use exception to interrupt the main cmd loop
raise RuntimeError("Session Closed")
def _set_output_file(self, file, no_stdout):
self._close_output_file()
self.out_file = file
self.no_stdout = no_stdout
def _close_output_file(self):
if self.out_file:
self.out_file.close()
self.out_file = None
self.no_stdout = False
def do_bye(self, arg):
"""Exit from the client.
If the arg is not logout, in other words, the user is issuing the bye command to shut down the client, or it is
called by inputting the EOF character, a message will display that the admin client is shutting down."""
if arg != "logout":
print("Shutting down admin client, please wait...")
self.api.logout()
return True
def do_lpwd(self, arg):
"""print local current work dir"""
self.write_string(os.getcwd())
def do_timeout(self, arg):
if not arg:
# display current setting
t = self.api.cmd_timeout
if t:
self.write_string(str(t))
else:
self.write_string("not set")
return
try:
t = float(arg)
self.api.set_command_timeout(t)
if t == 0:
self.write_string("command timeout is unset")
else:
self.write_string(f"command timeout is set to {t}")
except:
self.write_string("invalid timeout value - must be float number >= 0.0")
def emptyline(self):
return
def _show_one_command(self, cmd_name, reg, show_invisible=False):
entries = reg.get_command_entries(cmd_name)
if len(entries) <= 0:
self.write_string("Undefined command {}\n".format(cmd_name))
return
for e in entries:
if not e.visible and not show_invisible:
continue
if len(e.scope.name) > 0:
self.write_string("Command: {}.{}".format(e.scope.name, cmd_name))
else:
self.write_string("Command: {}".format(cmd_name))
self.write_string("Description: {}".format(e.desc))
self.write_string("Usage: {}\n".format(e.usage))
def _show_commands(self, reg: CommandRegister):
table = Table(["Command", "Description"])
for scope_name in sorted(reg.scopes):
scope = reg.scopes[scope_name]
for cmd_name in sorted(scope.entries):
e = scope.entries[cmd_name]
if e.visible:
table.add_row([cmd_name, e.desc])
self.write_table(table)
def do_help(self, arg):
if len(arg) <= 0:
self.write_string("Client Initiated / Overseer Commands")
self._show_commands(self.api.client_cmd_reg)
self.write_string("\nServer Commands")
self._show_commands(self.api.server_cmd_reg)
else:
server_cmds = []
local_cmds = []
parts = arg.split()
for p in parts:
entries = self.api.client_cmd_reg.get_command_entries(p)
if len(entries) > 0:
local_cmds.append(p)
entries = self.api.server_cmd_reg.get_command_entries(p)
if len(entries) > 0:
server_cmds.append(p)
if len(local_cmds) > 0:
self.write_string("Client Commands")
self.write_string("---------------")
for cmd_name in local_cmds:
self._show_one_command(cmd_name, self.api.client_cmd_reg)
if len(server_cmds) > 0:
self.write_string("Server Commands")
self.write_string("---------------")
for cmd_name in server_cmds:
self._show_one_command(cmd_name, self.api.server_cmd_reg, show_invisible=True)
def complete(self, text, state):
results = [x + " " for x in self.api.all_cmds if x.startswith(text)] + [None]
return results[state]
def default(self, line):
self._close_output_file()
try:
return self._do_default(line)
except KeyboardInterrupt:
self.write_stdout("\n")
except Exception as e:
if self.debug:
secure_log_traceback()
self.write_stdout(f"exception occurred: {secure_format_exception(e)}")
self._close_output_file()
def _do_default(self, line):
args = split_to_args(line)
cmd_name = args[0]
# check for file output
out_file_name = None
no_stdout = False
out_arg_idx = 0
for i in range(len(args)):
arg = args[i]
if arg.startswith(">") and out_file_name is not None:
self.write_error("only one output file is supported")
return
if arg.startswith(">>"):
# only output to file
out_file_name = arg[2:]
no_stdout = True
out_arg_idx = i
elif arg.startswith(">"):
# only output to file
out_file_name = arg[1:]
no_stdout = False
out_arg_idx = i
if out_file_name is not None:
if len(out_file_name) <= 0:
self.write_error("output file name must not be empty")
return
args.pop(out_arg_idx)
line = join_args(args)
try:
out_file = open(out_file_name, "w")
except Exception as e:
self.write_error(f"cannot open file {out_file_name}: {secure_format_exception(e)}")
return
self._set_output_file(out_file, no_stdout)
# check client command first
info = self.api.check_command(line)
if info == CommandInfo.UNKNOWN:
self.write_string("Undefined command {}".format(cmd_name))
return
elif info == CommandInfo.AMBIGUOUS:
self.write_string("Ambiguous command {} - qualify with scope".format(cmd_name))
return
elif info == CommandInfo.CONFIRM_AUTH:
if self.credential_type == CredentialType.PASSWORD:
info = CommandInfo.CONFIRM_PWD
elif self.user_name:
info = CommandInfo.CONFIRM_USER_NAME
else:
info = CommandInfo.CONFIRM_YN
if info == CommandInfo.CONFIRM_YN:
answer = input("Are you sure (y/N): ")
answer = answer.lower()
if answer != "y" and answer != "yes":
return
elif info == CommandInfo.CONFIRM_USER_NAME:
answer = input("Confirm with User Name: ")
if answer != self.user_name:
self.write_string("user name mismatch")
return
elif info == CommandInfo.CONFIRM_PWD:
pwd = getpass.getpass("Enter password to confirm: ")
if not verify_password(self.pwd, pwd):
self.write_string("Not authenticated")
return
# execute the command!
start = time.time()
resp = self.api.do_command(line)
secs = time.time() - start
usecs = int(secs * 1000000)
done = "Done [{} usecs] {}".format(usecs, datetime.now())
self.print_resp(resp)
if resp["status"] == APIStatus.ERROR_INACTIVE_SESSION:
return True
self.write_stdout(done)
if self.api.shutdown_received:
# exit the client
self.write_string(self.api.shutdown_msg)
return True
def preloop(self):
if readline and os.path.exists(self.cli_history_file):
readline.read_history_file(self.cli_history_file)
def postcmd(self, stop, line):
if readline:
readline.write_history_file(self.cli_history_file)
return stop
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
Overriding what is in cmd.Cmd to handle exiting client on Ctrl+D (EOF).
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except (EOFError, ConnectionError):
line = "bye"
except KeyboardInterrupt:
self.stdout.write("\n")
line = "\n"
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = "EOF"
else:
line = line.rstrip("\r\n")
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def run(self):
try:
while not self.api.is_ready():
time.sleep(1.0)
if self.api.shutdown_received:
return False
self.cmdloop(intro='Type ? to list commands; type "? cmdName" to show usage of a command.')
except RuntimeError as e:
if self.debug:
print(f"DEBUG: Exception {secure_format_exception(e)}")
finally:
self.stopped = True
self.api.close()
def _get_login_creds(self):
if self.credential_type == CredentialType.PASSWORD:
self.user_name = "admin"
self.pwd = hash_password("admin")
elif self.credential_type == CredentialType.LOCAL_CERT:
self.user_name = self.username
else:
self.user_name = input("User Name: ")
def print_resp(self, resp: dict):
"""Prints the server response
Args:
resp (dict): The server response.
"""
if ProtoKey.DETAILS in resp:
details = resp[ProtoKey.DETAILS]
if isinstance(details, str):
self.write_string(details)
elif isinstance(details, Table):
self.write_table(details)
if ProtoKey.DATA in resp:
for item in resp[ProtoKey.DATA]:
if not isinstance(item, dict):
continue
item_type = item.get(ProtoKey.TYPE)
item_data = item.get(ProtoKey.DATA)
if item_type == ProtoKey.STRING:
self.write_string(item_data)
elif item_type == ProtoKey.TABLE:
table = Table(None)
table.set_rows(item[ProtoKey.ROWS])
self.write_table(table)
elif item_type == ProtoKey.ERROR:
self.write_error(item_data)
elif item_type == ProtoKey.DICT:
self.write_dict(item_data)
if ProtoKey.DETAILS not in resp and ProtoKey.DATA not in resp:
self.write_string("Response is not correct.")
def write_stdout(self, data: str):
self.stdout.write(data + "\n")
def _write(self, content: str):
if not self.no_stdout:
self.stdout.write(content)
if self.out_file:
self.out_file.write(content)
def write_string(self, data: str):
content = data + "\n"
self._write(content)
def write_table(self, table: Table):
if not self.no_stdout:
table.write(self.stdout)
if self.out_file:
table.write(self.out_file)
def write_dict(self, data: dict):
content = json.dumps(data, indent=2) + "\n"
self._write(content)
def write_error(self, err: str):
content = "Error: " + err + "\n"
self._write(content)
def flush(self):
if not self.no_stdout:
self.stdout.flush()
if self.out_file:
self.out_file.flush()
| NVFlare-main | nvflare/fuel/hci/client/cli.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from enum import Enum
from typing import Callable, List, Optional
from nvflare.fuel.hci.client.api_status import APIStatus
class FLAdminAPIResponse(dict):
def __init__(self, status: APIStatus, details: dict = None, raw: dict = None):
"""Structure containing the response of calls to the api as key value pairs.
The status key is the primary indicator of the success of a call and can contain APIStatus.SUCCESS or another
APIStatus. Most calls will return additional information in the details key, which is also a dictionary of key
value pairs. The raw key can optionally have the underlying response from AdminAPI when relevant, particularly
when data is received from the server and the status of a call is APIStatus.ERROR_RUNTIME to provide additional
information.
Note that the status in this response primarily indicates that the command submitted successfully. Depending on
the command and especially for calls to multiple clients, the contents of details or the raw response should be
examined to determine if the execution of the command was successful for each specific client.
Args:
status: APIStatus for primary indicator of the success of a call
details: response details
raw: raw response from server
"""
super().__init__()
self["status"] = status # todo: status.value but it may break existing code
if details is not None:
self["details"] = details
if raw is not None:
self["raw"] = raw
class APISyntaxError(Exception):
pass
class TargetType(str, Enum):
ALL = "all"
SERVER = "server"
CLIENT = "client"
class FLAdminAPISpec(ABC):
@abstractmethod
def check_status(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
"""Checks and returns the FL status.
If target_type is server, the call does not wait for the server to retrieve
information on the clients but returns the last information the server had at the time this call is made.
If target_type is client, specific clients can be specified in targets, and this call generally takes longer
than the function to just check the FL server status because this one waits for communication from the server to
client then back.
Note that this is still the previous training check_status, and there will be a new call to get status through
InfoCollector, which will be able to get information from components.
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def submit_job(self, job_folder: str) -> FLAdminAPIResponse:
"""Submit a job.
Assumes job folder is in the upload_dir set in API init.
Args:
job_folder (str): name of the job folder in upload_dir to submit
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def clone_job(self, job_id: str) -> FLAdminAPIResponse:
"""Clone a job that exists by copying the job contents and providing a new job_id.
Args:
job_id (str): job id of the job to clone
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def list_jobs(self, options: str = None) -> FLAdminAPIResponse:
"""List the jobs in the system.
Args:
options (str): the options string as provided to the list_jobs command for admin client.
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def download_job(self, job_id: str) -> FLAdminAPIResponse:
"""Download the specified job in the system.
Args:
job_id (str): Job id for the job to download
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def abort_job(self, job_id: str) -> FLAdminAPIResponse:
"""Abort a job that is running.
Args:
job_id (str): the job id to abort
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def delete_job(self, job_id: str) -> FLAdminAPIResponse:
"""Delete the specified job and workspace from the permanent store.
Args:
job_id (str): the job id to delete
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def abort(self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
"""Issue a command to abort training.
Args:
job_id (str): job id
target_type: server | client
targets: if target_type is client, targets can optionally be a list of client names
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def restart(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
"""Issue a command to restart the specified target.
If the target is server, all FL clients will be restarted as well.
Args:
target_type: server | client
targets: if target_type is client, targets can optionally be a list of client names
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def shutdown(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
"""Issue a command to stop FL entirely for a specific FL client or specific FL clients.
Note that the targets will not be able to start with an API command after shutting down.
Args:
target_type: server | client
targets: if target_type is client, targets can optionally be a list of client names
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def remove_client(self, targets: List[str]) -> FLAdminAPIResponse:
"""Issue a command to remove a specific FL client or FL clients.
Note that the targets will not be able to start with an API command after shutting down. Also, you will not be
able to issue admin commands through the server to that client until the client is restarted (this includes
being able to issue the restart command through the API).
Args:
targets: a list of client names
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def set_timeout(self, timeout: float) -> FLAdminAPIResponse:
"""Sets the timeout for admin commands on the server in seconds.
This timeout is the maximum amount of time the server will wait for replies from clients. If the timeout is too
short, the server may not receive a response because clients may not have a chance to reply.
Args:
timeout: timeout in seconds of admin commands to set on the server
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def list_sp(self) -> FLAdminAPIResponse:
"""Gets the information on the available servers (service providers).
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def get_active_sp(self) -> FLAdminAPIResponse:
"""Gets the active server (service provider).
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def promote_sp(self, sp_end_point: str) -> FLAdminAPIResponse:
"""Sends command through overseer_agent to promote the specified sp_end_point to become the active server.
Args:
sp_end_point: service provider end point to promote to active in the form of server:fl_port:admin_port like example.com:8002:8003
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def get_available_apps_to_upload(self):
pass
@abstractmethod
def ls_target(self, target: str, options: str = None, path: str = None) -> FLAdminAPIResponse:
"""Issue ls command to retrieve the contents of the path.
Sends the shell command to get the directory listing of the target allowing for options that the ls command
of admin client allows. If no path is specified, the contents of the working directory are returned. The target
can be "server" or a specific client name for example "site2". The allowed options are: "-a" for all, "-l" to
use a long listing format, "-t" to sort by modification time newest first, "-S" to sort by file size largest
first, "-R" to list subdirectories recursively, "-u" with -l to show access time otherwise sort by access time.
Args:
target (str): either server or single client's client name.
options (str): the options string as provided to the ls command for admin client.
path (str): optionally, the path to specify (relative to the working directory of the specified target)
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def cat_target(self, target: str, options: str = None, file: str = None) -> FLAdminAPIResponse:
"""Issue cat command.
Sends the shell command to get the contents of the target's specified file allowing for options that the cat
command of admin client allows. The target can be "server" or a specific client name for example "site2". The
file is required and should contain the relative path to the file from the working directory of the target. The
allowed options are "-n" to number all output lines, "-b" to number nonempty output lines, "-s" to suppress
repeated empty output lines, and "-T" to display TAB characters as ^I.
Args:
target (str): either server or single client's client name.
options (str): the options string as provided to the ls command for admin client.
file (str): the path to the file to return the contents of
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def tail_target_log(self, target: str, options: str = None) -> FLAdminAPIResponse:
"""Returns the end of target's log allowing for options that the tail of admin client allows.
The option "-n" can be used to specify the number of lines for example "-n 100", or "-c" can specify the
number of bytes.
Args:
target (str): either server or single client's client name.
options (str): the options string as provided to the tail command for admin client. For this command, "-n" can be
used to specify the number of lines for example "-n 100", or "-c" can specify the number of bytes.
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def get_working_directory(self, target: str) -> FLAdminAPIResponse:
"""Gets the workspace root directory of the specified target.
Args:
target (str): either server or single client's client name.
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def grep_target(
self, target: str, options: str = None, pattern: str = None, file: str = None
) -> FLAdminAPIResponse:
"""Issue grep command.
Sends the shell command to grep the contents of the target's specified file allowing for options that the grep
command of admin client allows. The target can be "server" or a specific client name for example "site2". The
file is required and should contain the relative path to the file from the working directory of the target. The
pattern is also required. The allowed options are "-n" to print line number with output lines, "-i" to ignore
case distinctions, and "-b" to print the byte offset with output lines.
Args:
target (str): either server or single client's client name.
options (str): the options string as provided to the grep command for admin client.
pattern (str): the pattern to search for
file (str): the path to the file to grep
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def show_stats(
self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None
) -> FLAdminAPIResponse:
"""Gets and shows stats from the Info Collector.
Args:
job_id (str): job id
target_type: server | client
targets: if target_type is client, targets can optionally be a list of client names
Returns: FLAdminAPIResponse
"""
@abstractmethod
def show_errors(
self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None
) -> FLAdminAPIResponse:
"""Gets and shows errors from the Info Collector.
Args:
job_id (str): job id
target_type: server | client
targets: if target_type is client, targets can optionally be a list of client names
Returns: FLAdminAPIResponse
"""
@abstractmethod
def reset_errors(self, job_id: str) -> FLAdminAPIResponse:
"""Resets the collector errors.
Args:
job_id (str): job id
Returns: FLAdminAPIResponse
"""
@abstractmethod
def get_connected_client_list(self) -> FLAdminAPIResponse:
"""A convenience function to get a list of the clients currently connected to the FL server.
Operates through the check status server call. Note that this returns the client list based on the last known
statuses on the server, so it can be possible for a client to be disconnected and not yet removed from the list
of connected clients.
Returns: FLAdminAPIResponse
"""
pass
@abstractmethod
def wait_until_server_status(
self,
interval: int = 20,
timeout: int = None,
callback: Callable[[FLAdminAPIResponse], bool] = None,
fail_attempts: int = 3,
) -> FLAdminAPIResponse:
"""Wait until provided callback returns True.
There is the option to specify a timeout and interval to check the server status. If no callback function is
provided, the default callback returns True when the server
status is "training stopped". A custom callback can be provided to add logic to handle checking for other
conditions. A timeout should be set in case there are any error conditions that result in the system being stuck
in a state where the callback never returns True.
Args:
interval (int): in seconds, the time between consecutive checks of the server
timeout (int): if set, the amount of time this function will run until before returning a response message
callback: the reply from check_status_server() will be passed to the callback, along with any additional kwargs
which can go on to perform additional logic.
fail_attempts (int): number of consecutive failed attempts of getting the server status before returning with ERROR_RUNTIME.
Returns: FLAdminAPIResponse
"""
pass
| NVFlare-main | nvflare/fuel/hci/client/fl_admin_api_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class FLDetailKey(str, Enum):
"""Constants for FL details that can be returned in the FLAdminAPI."""
APP_NAME = "app_name"
REGISTERED_CLIENTS = "registered_clients"
CONNECTED_CLIENTS = "connected_clients"
SERVER_ENGINE_STATUS = "server_engine_status"
SERVER_LOG = "server_log"
CLIENT_LOG = "client_log"
STATUS_TABLE = "status_table"
RESPONSES = "responses"
SUBMITTED_MODELS = "submitted_models"
| NVFlare-main | nvflare/fuel/hci/client/fl_admin_api_constants.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.hci.client.fl_admin_api import FLAdminAPI
from nvflare.fuel.hci.client.fl_admin_api_spec import TargetType
from nvflare.private.fed.app.fl_conf import FLAdminClientStarterConfigurator
from nvflare.security.logging import secure_format_exception
def api_command_wrapper(api_command_result):
"""Prints the result of the command and raises RuntimeError to interrupt command sequence if there is an error.
Args:
api_command_result: result of the api command
"""
print(api_command_result)
if not api_command_result["status"] == "SUCCESS":
raise RuntimeError("command was not successful!")
return api_command_result
class FLAdminAPIRunner:
def __init__(
self,
username,
admin_dir,
poc=False,
debug=False,
):
"""Initializes and logs into an FLAdminAPI instance.
The default locations for certs, keys, and directories are used.
Args:
username: string of username to log in with
admin_dir: string of root admin dir containing the startup dir
poc: whether to run in poc mode without SSL certs
debug: whether to turn on debug mode
"""
assert isinstance(username, str), "username must be str"
self.username = username
assert isinstance(admin_dir, str), "admin_dir must be str"
if poc:
self.poc = True
else:
self.poc = False
if debug:
debug = True
try:
os.chdir(admin_dir)
workspace = Workspace(root_dir=admin_dir)
conf = FLAdminClientStarterConfigurator(workspace)
conf.configure()
except ConfigError as e:
print(f"ConfigError: {secure_format_exception(e)}")
return
try:
admin_config = conf.config_data["admin"]
except KeyError:
print("Missing admin section in fed_admin configuration.")
return
ca_cert = admin_config.get("ca_cert", "")
client_cert = admin_config.get("client_cert", "")
client_key = admin_config.get("client_key", "")
if admin_config.get("with_ssl"):
if len(ca_cert) <= 0:
print("missing CA Cert file name field ca_cert in fed_admin configuration")
return
if len(client_cert) <= 0:
print("missing Client Cert file name field client_cert in fed_admin configuration")
return
if len(client_key) <= 0:
print("missing Client Key file name field client_key in fed_admin configuration")
return
else:
ca_cert = None
client_key = None
client_cert = None
upload_dir = admin_config.get("upload_dir")
download_dir = admin_config.get("download_dir")
if not os.path.isdir(download_dir):
os.makedirs(download_dir)
assert os.path.isdir(admin_dir), f"admin directory does not exist at {admin_dir}"
if not self.poc:
assert os.path.isfile(ca_cert), f"rootCA.pem does not exist at {ca_cert}"
assert os.path.isfile(client_cert), f"client.crt does not exist at {client_cert}"
assert os.path.isfile(client_key), f"client.key does not exist at {client_key}"
# Connect with admin client
self.api = FLAdminAPI(
ca_cert=ca_cert,
client_cert=client_cert,
client_key=client_key,
upload_dir=upload_dir,
download_dir=download_dir,
overseer_agent=conf.overseer_agent,
user_name=username,
insecure=self.poc,
debug=debug,
)
# wait for admin to login
_t_warning_start = time.time()
while not self.api.server_sess_active:
time.sleep(0.5)
if time.time() - _t_warning_start > 10:
print("Admin is taking a long time to log in to the server...")
print("Make sure the server is up and available, and all configurations are correct.")
_t_warning_start = time.time()
def run(
self,
job_folder_name,
):
"""An example script to upload, deploy, and start a specified app.
Note that the app folder must be in upload_dir already. Prints the command to be executed first so it is easy
to follow along as the commands run.
Args:
job_folder_name: name of job folder to submit, either relative to the upload_dir specified in the fed_admin.json config, or absolute path
"""
try:
print("api.check_status(TargetType.SERVER)")
api_command_wrapper(self.api.check_status(TargetType.SERVER))
print(f'api.submit_job("{job_folder_name}")')
api_command_wrapper(self.api.submit_job(job_folder_name))
time.sleep(1)
print("api.check_status(TargetType.SERVER)")
api_command_wrapper(self.api.check_status(TargetType.SERVER))
# The following wait_until can be put into a loop that has other behavior other than waiting until clients
# are in a status of stopped. For this code, the app is expected to stop, or this may not end.
print("api.wait_until_client_status()")
wait_result = api_command_wrapper(self.api.wait_until_client_status())
print(wait_result)
print("api.check_status(TargetType.SERVER)")
api_command_wrapper(self.api.check_status(TargetType.SERVER))
# now server engine status should be stopped
time.sleep(10) # wait for clients to stop in case they take longer than server to stop
print("api.check_status(TargetType.CLIENT)")
api_command_wrapper(self.api.check_status(TargetType.CLIENT))
except RuntimeError as e:
print(f"There was an exception: {secure_format_exception(e)}")
| NVFlare-main | nvflare/fuel/hci/client/fl_admin_api_runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.apis.overseer_spec import OverseerAgent
from nvflare.fuel.hci.reg import CommandModule
from nvflare.ha.ha_admin_cmds import HACommandModule
from .api_spec import ServiceFinder
class ServiceFinderByOverseer(ServiceFinder):
def __init__(self, overseer_agent: OverseerAgent):
if not isinstance(overseer_agent, OverseerAgent):
raise TypeError(f"overseer_agent must be OverseerAgent but got {type(overseer_agent)}")
self.overseer_agent = overseer_agent
self.sp_address_changed_cb = None
self.host = ""
self.port = 0
self.ssid = ""
def set_secure_context(self, ca_cert_path: str, cert_path: str, private_key_path: str):
self.overseer_agent.set_secure_context(ca_path=ca_cert_path, cert_path=cert_path, prv_key_path=private_key_path)
def get_command_module(self) -> CommandModule:
return HACommandModule(self.overseer_agent)
def start(self, sp_address_changed_cb):
if not callable(sp_address_changed_cb):
raise TypeError("sp_address_changed_cb must be callable but got {}".format(type(sp_address_changed_cb)))
self.sp_address_changed_cb = sp_address_changed_cb
self.overseer_agent.start(self._overseer_callback)
def _overseer_callback(self, overseer_agent):
sp = overseer_agent.get_primary_sp()
if not sp or not sp.primary:
return
port_num = int(sp.admin_port)
if self.host != sp.name or self.port != port_num or self.ssid != sp.service_session_id:
# SP changed!
self.host = sp.name
self.port = port_num
self.ssid = sp.service_session_id
if self.sp_address_changed_cb is not None:
self.sp_address_changed_cb(self.host, self.port, self.ssid)
def stop(self):
self.overseer_agent.end()
| NVFlare-main | nvflare/fuel/hci/client/overseer_service_finder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import enum
from abc import ABC, abstractmethod
from nvflare.fuel.common.ctx import SimpleContext
from nvflare.fuel.hci.reg import CommandModule
from nvflare.fuel.hci.table import Table
class CommandCtxKey(object):
API = "api"
CMD = "cmd"
CMD_ENTRY = "cmd_entry"
CMD_ARGS = "cmd_args"
REPLY_PROCESSOR = "reply_processor"
RESULT = "result"
JSON_PROCESSOR = "json_processor"
META = "meta"
CUSTOM_PROPS = "custom_props"
BYTES_RECEIVER = "bytes_receiver"
class CommandContext(SimpleContext):
def set_bytes_receiver(self, r):
self.set_prop(CommandCtxKey.BYTES_RECEIVER, r)
def get_bytes_receiver(self):
return self.get_prop(CommandCtxKey.BYTES_RECEIVER)
def set_command_result(self, result):
self.set_prop(CommandCtxKey.RESULT, result)
def get_command_result(self):
return self.get_prop(CommandCtxKey.RESULT)
def set_api(self, api):
self.set_prop(CommandCtxKey.API, api)
def get_api(self):
return self.get_prop(CommandCtxKey.API)
def set_command(self, command):
self.set_prop(CommandCtxKey.CMD, command)
def get_command(self):
return self.get_prop(CommandCtxKey.CMD)
def get_command_name(self):
args = self.get_command_args()
full_name = args[0]
parts = full_name.split(".")
return parts[-1]
def set_command_args(self, cmd_args):
self.set_prop(CommandCtxKey.CMD_ARGS, cmd_args)
def get_command_args(self):
return self.get_prop(CommandCtxKey.CMD_ARGS)
def set_command_entry(self, entry):
self.set_prop(CommandCtxKey.CMD_ENTRY, entry)
def get_command_entry(self):
return self.get_prop(CommandCtxKey.CMD_ENTRY)
def set_reply_processor(self, processor):
self.set_prop(CommandCtxKey.REPLY_PROCESSOR, processor)
def get_reply_processor(self):
return self.get_prop(CommandCtxKey.REPLY_PROCESSOR)
def set_json_processor(self, processor):
self.set_prop(CommandCtxKey.JSON_PROCESSOR, processor)
def get_json_processor(self):
return self.get_prop(CommandCtxKey.JSON_PROCESSOR)
def set_meta(self, meta):
self.set_prop(CommandCtxKey.META, meta)
def get_meta(self):
return self.get_prop(CommandCtxKey.META)
def set_custom_props(self, value):
self.set_prop(CommandCtxKey.CUSTOM_PROPS, value)
def get_custom_props(self):
return self.get_prop(CommandCtxKey.CUSTOM_PROPS)
class ApiPocValue(object):
ADMIN = "admin"
class CommandInfo(enum.Enum):
OK = 0
UNKNOWN = 1
AMBIGUOUS = 2
CONFIRM_PWD = 3
CONFIRM_YN = 4
CONFIRM_USER_NAME = 5
CONFIRM_AUTH = 6
class ReplyProcessor:
"""A base class for parsing server's response."""
def reply_start(self, ctx: CommandContext, reply_json):
pass
def process_string(self, ctx: CommandContext, item: str):
pass
def process_success(self, ctx: CommandContext, item: str):
pass
def process_error(self, ctx: CommandContext, err: str):
pass
def process_table(self, ctx: CommandContext, table: Table):
pass
def process_dict(self, ctx: CommandContext, data: dict):
pass
def process_shutdown(self, ctx: CommandContext, msg: str):
pass
def process_token(self, ctx: CommandContext, token: str):
pass
def protocol_error(self, ctx: CommandContext, err: str):
pass
def reply_done(self, ctx: CommandContext):
pass
def process_bytes(self, ctx: CommandContext):
pass
class AdminAPISpec(ABC):
@abstractmethod
def is_ready(self) -> bool:
"""Whether the API is ready for executing commands."""
pass
@abstractmethod
def do_command(self, command: str):
"""Executes a command.
The command could be a client command or a server command.
Args:
command: The command to be executed.
"""
pass
@abstractmethod
def server_execute(self, command: str, reply_processor=None, cmd_ctx=None):
"""Executes a command on server side.
Args:
command: The command to be executed.
reply_processor: processor to process reply from server
cmd_ctx: command context
"""
pass
@abstractmethod
def check_command(self, command: str) -> CommandInfo:
"""Checks the specified command for processing info.
The command could be a client command or a server command.
Args:
command: command to be checked
Returns: command processing info
"""
pass
def service_address_changed_cb_signature(host: str, port: int, ssid: str):
pass
class ServiceFinder(ABC):
@abstractmethod
def start(self, service_address_changed_cb):
pass
@abstractmethod
def stop(self):
pass
def set_secure_context(self, ca_cert_path: str, cert_path: str, private_key_path: str):
pass
def get_command_module(self) -> CommandModule:
pass
| NVFlare-main | nvflare/fuel/hci/client/api_spec.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple
from nvflare.apis.fl_constant import AdminCommandNames
from nvflare.apis.overseer_spec import OverseerAgent
from nvflare.apis.utils.format_check import type_pattern_mapping
from nvflare.fuel.hci.client.api import AdminAPI
from nvflare.fuel.hci.client.api_status import APIStatus
from nvflare.fuel.hci.client.fl_admin_api_constants import FLDetailKey
from nvflare.fuel.hci.client.fl_admin_api_spec import APISyntaxError, FLAdminAPIResponse, FLAdminAPISpec, TargetType
from nvflare.security.logging import secure_format_exception
from .overseer_service_finder import ServiceFinderByOverseer
def wrap_with_return_exception_responses(func):
"""Decorator on all FLAdminAPI calls to handle any raised exceptions and return the fitting error status."""
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
reply = func(self, *args, **kwargs)
if reply:
return reply
else:
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not generate reply."}
)
except ConnectionRefusedError as e:
return FLAdminAPIResponse(
APIStatus.ERROR_AUTHENTICATION, {"message": f"Error: {secure_format_exception(e)}"}
)
except PermissionError as e:
return FLAdminAPIResponse(
APIStatus.ERROR_AUTHORIZATION, {"message": f"Error: {secure_format_exception(e)}"}
)
except LookupError as e:
return FLAdminAPIResponse(
APIStatus.ERROR_INVALID_CLIENT, {"message": f"Error: {secure_format_exception(e)}"}
)
except APISyntaxError as e:
return FLAdminAPIResponse(APIStatus.ERROR_SYNTAX, {"message": f"Error: {secure_format_exception(e)}"})
except TimeoutError as e:
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME,
{"message": f"TimeoutError: possibly unable to communicate with server: {secure_format_exception(e)}"},
)
except Exception as e:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": f"Exception: {secure_format_exception(e)}"})
return wrapper
def default_server_status_handling_cb(reply: FLAdminAPIResponse, **kwargs) -> bool:
if reply["details"][FLDetailKey.SERVER_ENGINE_STATUS] == "stopped":
return True
else:
return False
def default_client_status_handling_cb(reply: FLAdminAPIResponse) -> bool:
client_statuses = reply.get("details").get("client_statuses")
stopped_client_count = 0
for i in range(1, len(client_statuses)):
if client_statuses[i][3] == "No Jobs":
stopped_client_count = stopped_client_count + 1
if stopped_client_count == len(client_statuses) - 1:
return True
else:
return False
def default_stats_handling_cb(reply: FLAdminAPIResponse) -> bool:
if reply.get("details").get("message").get("ServerRunner").get("status") == "done":
return True
else:
return False
class FLAdminAPI(AdminAPI, FLAdminAPISpec):
def __init__(
self,
overseer_agent: OverseerAgent,
ca_cert: str = "",
client_cert: str = "",
client_key: str = "",
upload_dir: str = "",
download_dir: str = "",
cmd_modules: Optional[List] = None,
user_name: str = None,
insecure=False,
debug=False,
session_timeout_interval=None,
session_status_check_interval=None,
auto_login_max_tries: int = 5,
):
"""FLAdminAPI serves as foundation for communications to FL server through the AdminAPI.
Upon initialization, FLAdminAPI will start the overseer agent to get the active server and then try to log in.
This happens in a thread, so code that executes after should check that the FLAdminAPI is successfully logged in.
Args:
ca_cert: path to CA Cert file, by default provisioned rootCA.pem
client_cert: path to admin client Cert file, by default provisioned as client.crt
client_key: path to admin client Key file, by default provisioned as client.key
upload_dir: File transfer upload directory. Folders uploaded to the server to be deployed must be here. Folder must already exist and be accessible.
download_dir: File transfer download directory. Can be same as upload_dir. Folder must already exist and be accessible.
cmd_modules: command modules to load and register. Note that FileTransferModule is initialized here with upload_dir and download_dir if cmd_modules is None.
overseer_agent: initialized OverseerAgent to obtain the primary service provider to set the host and port of the active server
user_name: Username to authenticate with FL server
insecure: Whether or not to use secure communication, poc was the name of this arg before version 2.4.
debug: Whether to print debug messages. False by default.
session_timeout_interval: if specified, automatically close the session after inactive for this long
session_status_check_interval: how often to check session status with server
auto_login_max_tries: maximum number of tries to auto-login.
"""
service_finder = ServiceFinderByOverseer(overseer_agent)
AdminAPI.__init__(
self,
ca_cert=ca_cert,
client_cert=client_cert,
client_key=client_key,
upload_dir=upload_dir,
download_dir=download_dir,
cmd_modules=cmd_modules,
service_finder=service_finder,
user_name=user_name,
insecure=insecure,
debug=debug,
session_timeout_interval=session_timeout_interval,
session_status_check_interval=session_status_check_interval,
auto_login_max_tries=auto_login_max_tries,
)
self.upload_dir = upload_dir
self.download_dir = download_dir
self._error_buffer = None
def _process_targets_into_str(self, targets: List[str]) -> str:
if not isinstance(targets, list):
raise APISyntaxError("targets is not a list.")
if not all(isinstance(t, str) for t in targets):
raise APISyntaxError("all targets in the list of targets must be strings.")
for t in targets:
try:
self._validate_required_target_string(t)
except APISyntaxError:
raise APISyntaxError("each target in targets must be a string of only valid characters and no spaces.")
return " ".join(targets)
def _validate_required_target_string(self, target: str) -> str:
"""Returns the target string if it exists and is valid."""
if not target:
raise APISyntaxError("target is required but not specified.")
if not isinstance(target, str):
raise APISyntaxError("target is not str.")
if not re.match("^[A-Za-z0-9._-]*$", target):
raise APISyntaxError("target must be a string of only valid characters and no spaces.")
return target
def _validate_options_string(self, options: str) -> str:
"""Returns the options string if it is valid."""
if not isinstance(options, str):
raise APISyntaxError("options is not str.")
if not re.match("^[A-Za-z0-9- ]*$", options):
raise APISyntaxError("options must be a string of only valid characters.")
return options
def _validate_path_string(self, path: str) -> str:
"""Returns the path string if it is valid."""
if not isinstance(path, str):
raise APISyntaxError("path is not str.")
if not re.match("^[A-Za-z0-9-._/]*$", path):
raise APISyntaxError("unsupported characters in path {}".format(path))
if path.startswith("/"):
raise APISyntaxError("absolute path is not allowed")
paths = path.split("/")
for p in paths:
if p == "..":
raise APISyntaxError(".. in path name is not allowed")
return path
def _validate_file_string(self, file: str) -> str:
"""Returns the file string if it is valid."""
if not isinstance(file, str):
raise APISyntaxError("file is not str.")
if not re.match("^[A-Za-z0-9-._/]*$", file):
raise APISyntaxError("unsupported characters in file {}".format(file))
if file.startswith("/"):
raise APISyntaxError("absolute path for file is not allowed")
paths = file.split("/")
for p in paths:
if p == "..":
raise APISyntaxError(".. in file path is not allowed")
basename, file_extension = os.path.splitext(file)
if file_extension not in [".txt", ".log", ".json", ".csv", ".sh", ".config", ".py"]:
raise APISyntaxError(
"this command cannot be applied to file {}. Only files with the following extensions are "
"permitted: .txt, .log, .json, .csv, .sh, .config, .py".format(file)
)
return file
def _validate_sp_string(self, sp_string) -> str:
if re.match(
type_pattern_mapping.get("sp_end_point"),
sp_string,
):
return sp_string
else:
raise APISyntaxError("sp_string must be of the format example.com:8002:8003")
def _get_processed_cmd_reply_data(self, command) -> Tuple[bool, str, Dict[str, Any]]:
"""Executes the specified command through the underlying AdminAPI's do_command() and checks the response to
raise common errors.
Returns:
Tuple of bool to indicate if success is in reply data, str with full response of the reply data, and the raw
reply.
"""
success_in_data = False
reply = self.do_command(command)
# handle errors from write_error (these can be from FileTransferModule)
if self._error_buffer:
err = self._error_buffer
self._error_buffer = None
if "not authorized" in err:
raise PermissionError(err)
raise RuntimeError(err)
if reply.get("status") == APIStatus.SUCCESS:
success_in_data = True
reply_data_list = []
reply_data_full_response = ""
if reply.get("data"):
for data in reply["data"]:
if isinstance(data, dict):
if data.get("type") == "success":
success_in_data = True
if data.get("type") == "string" or data.get("type") == "error":
reply_data_list.append(data["data"])
reply_data_full_response = "\n".join(reply_data_list)
if "session_inactive" in reply_data_full_response:
raise ConnectionRefusedError(reply_data_full_response)
if "Failed to communicate" in reply_data_full_response:
raise ConnectionError(reply_data_full_response)
if "invalid client" in reply_data_full_response:
raise LookupError(reply_data_full_response)
if "unknown site" in reply_data_full_response:
raise LookupError(reply_data_full_response)
if "not authorized" in reply_data_full_response:
raise PermissionError(reply_data_full_response)
if reply.get("status") != APIStatus.SUCCESS:
if reply.get("details") and ("not authorized" in reply.get("details")):
raise PermissionError(reply.get("details"))
raise RuntimeError(reply.get("details"))
return success_in_data, reply_data_full_response, reply
def _parse_section_of_response_text(
self, data, start_string: str, offset: int = None, end_string: str = None, end_index=None
) -> str:
"""Convenience method to get portion of string based on parameters."""
if not offset:
offset = len(start_string) + 1
if end_string:
return data[data.find(start_string) + offset : data.find(end_string)]
if end_index:
return data[data.find(start_string) + offset : end_index]
return data[data.find(start_string) + offset :]
def _parse_section_of_response_text_as_int(
self, data, start_string: str, offset: int = None, end_string: str = None, end_index=None
) -> int:
try:
return int(
self._parse_section_of_response_text(
data=data, start_string=start_string, offset=offset, end_string=end_string, end_index=end_index
)
)
except ValueError:
return -1
def write_error(self, error: str) -> None:
"""Internally used to handle errors from FileTransferModule"""
self._error_buffer = error
@wrap_with_return_exception_responses
def check_status(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
if target_type == TargetType.SERVER:
return self._check_status_server()
elif target_type == TargetType.CLIENT:
return self._check_status_client(targets)
else:
raise APISyntaxError("target_type must be server or client.")
def _check_status_server(self) -> FLAdminAPIResponse:
"""
Checks the server status and returns the details. This call does not wait for the server to retrieve information
on the clients but returns the last information the server had at the time this call is made.
"""
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.CHECK_STATUS + " server"
)
details = {}
if reply.get("data"):
for data in reply["data"]:
if data["type"] == "string":
if data["data"].find("Engine status:") != -1:
details[FLDetailKey.SERVER_ENGINE_STATUS] = self._parse_section_of_response_text(
data=data["data"], start_string="Engine status:"
)
if data["data"].find("Registered clients:") != -1:
details[FLDetailKey.REGISTERED_CLIENTS] = self._parse_section_of_response_text_as_int(
data=data["data"], start_string="Registered clients:"
)
if data["type"] == "table":
details[FLDetailKey.STATUS_TABLE] = data["rows"]
return FLAdminAPIResponse(APIStatus.SUCCESS, details, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
def _check_status_client(self, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.CHECK_STATUS + " client " + processed_targets_str
else:
command = AdminCommandNames.CHECK_STATUS + " client"
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
details = {}
if reply.get("data"):
for data in reply["data"]:
if data["type"] == "table":
details["client_statuses"] = data["rows"]
return FLAdminAPIResponse(APIStatus.SUCCESS, details, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def submit_job(self, job_folder: str) -> FLAdminAPIResponse:
if not job_folder:
raise APISyntaxError("job_folder is required but not specified.")
if not isinstance(job_folder, str):
raise APISyntaxError("job_folder must be str but got {}.".format(type(job_folder)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.SUBMIT_JOB + " " + job_folder
)
if reply_data_full_response:
if "Submitted job" in reply_data_full_response:
# TODO:: this is a hack to get job id
return FLAdminAPIResponse(
APIStatus.SUCCESS,
{"message": reply_data_full_response, "job_id": reply_data_full_response.split(":")[-1].strip()},
reply,
)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def clone_job(self, job_id: str) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.CLONE_JOB + " " + job_id
)
if reply_data_full_response:
if "Cloned job" in reply_data_full_response:
return FLAdminAPIResponse(
APIStatus.SUCCESS,
{"message": reply_data_full_response, "job_id": reply_data_full_response.split(":")[-1].strip()},
reply,
)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def list_jobs(self, options: str = None) -> FLAdminAPIResponse:
command = AdminCommandNames.LIST_JOBS
if options:
options = self._validate_options_string(options)
command = command + " " + options
success, _, reply = self._get_processed_cmd_reply_data(command)
if success:
meta = reply.get("meta")
if meta:
jobs_list = meta.get("jobs", [])
return FLAdminAPIResponse(APIStatus.SUCCESS, jobs_list, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def download_job(self, job_id: str) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.DOWNLOAD_JOB + " " + job_id
)
if success:
return FLAdminAPIResponse(
APIStatus.SUCCESS,
{"message": reply.get("details")},
reply,
)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def abort_job(self, job_id: str) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.ABORT_JOB + " " + job_id
)
if reply:
meta = reply.get("meta", None)
if isinstance(meta, dict):
status = meta.get("status")
info = meta.get("info", "")
if status == "ok":
return FLAdminAPIResponse(
APIStatus.SUCCESS,
{"message": info},
reply,
)
else:
msg = f"{status}: {info}"
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": msg}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def delete_job(self, job_id: str) -> FLAdminAPIResponse:
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.DELETE_JOB + " " + str(job_id)
)
if reply_data_full_response:
if ("can not be deleted" in reply_data_full_response) or (
"could not be deleted" in reply_data_full_response
):
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def abort(self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
if target_type == TargetType.ALL:
command = AdminCommandNames.ABORT + " " + job_id + " all"
elif target_type == TargetType.SERVER:
command = AdminCommandNames.ABORT + " " + job_id + " server"
elif target_type == TargetType.CLIENT:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.ABORT + " " + job_id + " client " + processed_targets_str
else:
command = AdminCommandNames.ABORT + " " + job_id + " client"
else:
raise APISyntaxError("target_type must be server, client, or all.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
if "Server app has not started" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
if "No clients to abort" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
if "please wait for started before abort" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
if success:
return_details = {}
if reply_data_full_response:
return_details["message"] = reply_data_full_response
if reply.get("data"):
for data in reply["data"]:
if data["type"] == "table":
return_details[FLDetailKey.RESPONSES] = data["rows"]
return FLAdminAPIResponse(APIStatus.SUCCESS, return_details, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def restart(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
if target_type == TargetType.ALL:
command = AdminCommandNames.RESTART + " " + "all"
elif target_type == TargetType.SERVER:
command = AdminCommandNames.RESTART + " " + "server"
elif target_type == TargetType.CLIENT:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.RESTART + " client " + processed_targets_str
else:
command = AdminCommandNames.RESTART + " " + "client"
else:
raise APISyntaxError("target_type must be server, client, or all.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
if "no clients available" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if "Server is starting, please wait for started before restart" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def shutdown(self, target_type: TargetType, targets: Optional[List[str]] = None) -> FLAdminAPIResponse:
if target_type == TargetType.ALL:
command = AdminCommandNames.SHUTDOWN + " " + "all"
elif target_type == TargetType.SERVER:
command = AdminCommandNames.SHUTDOWN + " " + "server"
elif target_type == TargetType.CLIENT:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.SHUTDOWN + " client " + processed_targets_str
else:
command = AdminCommandNames.SHUTDOWN + " " + "client"
else:
raise APISyntaxError("target_type must be server, client, or all.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
if "There are still active clients. Shutdown all clients first." in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if "no clients to shutdown" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if "Server is starting, please wait for started before shutdown" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response})
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def remove_client(self, targets: List[str]) -> FLAdminAPIResponse:
if not targets:
raise APISyntaxError("targets needs to be provided as a list of client names.")
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.REMOVE_CLIENT + " " + processed_targets_str
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def set_timeout(self, timeout: float) -> FLAdminAPIResponse:
if not isinstance(timeout, (float, int)):
raise APISyntaxError("timeout is not float.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data("set_timeout " + str(timeout))
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def list_sp(self) -> FLAdminAPIResponse:
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data("list_sp")
if reply.get("data"):
return FLAdminAPIResponse(APIStatus.SUCCESS, reply.get("data"), reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def get_active_sp(self) -> FLAdminAPIResponse:
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data("get_active_sp")
if reply.get("details"):
return FLAdminAPIResponse(APIStatus.SUCCESS, reply.get("details"), reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def promote_sp(self, sp_end_point: str) -> FLAdminAPIResponse:
sp_end_point = self._validate_sp_string(sp_end_point)
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data("promote_sp " + sp_end_point)
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply.get("details")}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def shutdown_system(self) -> FLAdminAPIResponse:
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data("shutdown_system")
if success:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply.get("details")}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def get_available_apps_to_upload(self):
dir_list = []
for item in os.listdir(self.upload_dir):
if os.path.isdir(os.path.join(self.upload_dir, item)):
dir_list.append(item)
return FLAdminAPIResponse(APIStatus.SUCCESS, {"app_list": dir_list})
@wrap_with_return_exception_responses
def ls_target(self, target: str, options: str = None, path: str = None) -> FLAdminAPIResponse:
target = self._validate_required_target_string(target)
command = "ls " + target
if options:
options = self._validate_options_string(options)
command = command + " " + options
if path:
path = self._validate_path_string(path)
command = command + " " + path
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response})
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def cat_target(self, target: str, options: str = None, file: str = None) -> FLAdminAPIResponse:
if not file:
raise APISyntaxError("file is required but not specified.")
file = self._validate_file_string(file)
target = self._validate_required_target_string(target)
command = "cat " + target
if options:
options = self._validate_options_string(options)
command = command + " " + options
if file:
command = command + " " + file
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response})
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def tail_target_log(self, target: str, options: str = None) -> FLAdminAPIResponse:
target = self._validate_required_target_string(target)
command = "tail " + target
if options:
options = self._validate_options_string(options)
command = command + " " + options
command = command + " log.txt"
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response})
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def get_working_directory(self, target: str) -> FLAdminAPIResponse:
target = self._validate_required_target_string(target)
command = "pwd " + target
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response})
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def grep_target(
self, target: str, options: str = None, pattern: str = None, file: str = None
) -> FLAdminAPIResponse:
if not file:
raise APISyntaxError("file is required but not specified.")
file = self._validate_file_string(file)
if not pattern:
raise APISyntaxError("pattern is required but not specified.")
if not isinstance(pattern, str):
raise APISyntaxError("pattern is not str.")
target = self._validate_required_target_string(target)
command = "grep " + target
if options:
options = self._validate_options_string(options)
command = command + " " + options
command = command + ' "' + pattern + '" ' + file
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply_data_full_response:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response})
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def show_stats(
self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None
) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
if target_type == TargetType.SERVER:
command = AdminCommandNames.SHOW_STATS + " " + job_id + " server"
elif target_type == TargetType.CLIENT:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.SHOW_STATS + " " + job_id + " client " + processed_targets_str
else:
command = AdminCommandNames.SHOW_STATS + " " + job_id + " client"
else:
raise APISyntaxError("target_type must be server or client.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply.get("data"):
for data in reply["data"]:
if data["type"] == "dict":
stats_result = data["data"]
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": stats_result}, reply)
if reply_data_full_response:
if "App is not running" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def show_errors(
self, job_id: str, target_type: TargetType, targets: Optional[List[str]] = None
) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
if target_type == TargetType.SERVER:
command = AdminCommandNames.SHOW_ERRORS + " " + job_id + " server"
elif target_type == TargetType.CLIENT:
if targets:
processed_targets_str = self._process_targets_into_str(targets)
command = AdminCommandNames.SHOW_ERRORS + " " + job_id + " client " + processed_targets_str
else:
command = AdminCommandNames.SHOW_ERRORS + " " + job_id + " client"
else:
raise APISyntaxError("target_type must be server or client.")
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(command)
if reply.get("data"):
for data in reply["data"]:
if data["type"] == "dict":
errors_result = data["data"]
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": errors_result}, reply)
if reply_data_full_response:
if "App is not running" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": "No errors."}, reply)
@wrap_with_return_exception_responses
def reset_errors(self, job_id: str) -> FLAdminAPIResponse:
if not job_id:
raise APISyntaxError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise APISyntaxError("job_id must be str but got {}.".format(type(job_id)))
success, reply_data_full_response, reply = self._get_processed_cmd_reply_data(
AdminCommandNames.RESET_ERRORS + " " + job_id
)
if reply_data_full_response:
if "App is not running" in reply_data_full_response:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": reply_data_full_response}, reply)
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME, {"message": "Runtime error: could not handle server reply."}, reply
)
@wrap_with_return_exception_responses
def get_connected_client_list(self) -> FLAdminAPIResponse:
reply = self._check_status_server()
if reply["status"] == APIStatus.SUCCESS:
status_table = reply["details"][FLDetailKey.STATUS_TABLE]
list_of_connected_clients = []
# first line is the header of table
for row in status_table[1:]:
list_of_connected_clients.append(row[0])
return FLAdminAPIResponse(APIStatus.SUCCESS, {FLDetailKey.CONNECTED_CLIENTS: list_of_connected_clients})
else:
return FLAdminAPIResponse(APIStatus.ERROR_RUNTIME, {"message": "runtime error"}, reply)
@wrap_with_return_exception_responses
def wait_until_server_status(
self,
interval: int = 20,
timeout: int = None,
callback: Callable[[FLAdminAPIResponse, Optional[List]], bool] = default_server_status_handling_cb,
fail_attempts: int = 3,
**kwargs,
) -> FLAdminAPIResponse:
failed_attempts = 0
start = time.time()
while True:
reply = self._check_status_server()
if reply["details"].get(FLDetailKey.SERVER_ENGINE_STATUS):
met = callback(reply, **kwargs)
if met:
return FLAdminAPIResponse(APIStatus.SUCCESS, {}, None)
fail_attempts = 0
else:
print("Could not get reply from check status server, trying again later")
failed_attempts += 1
now = time.time()
if timeout is not None:
if now - start >= timeout:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": "Waited until timeout."}, None)
if failed_attempts > fail_attempts:
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME,
{
"message": "FL server status was not obtainable for more than the specified number of "
"fail_attempts. "
},
None,
)
time.sleep(interval)
@wrap_with_return_exception_responses
def wait_until_client_status(
self,
interval: int = 10,
timeout: int = None,
callback: Callable[[FLAdminAPIResponse, Optional[List]], bool] = default_client_status_handling_cb,
fail_attempts: int = 6,
**kwargs,
) -> FLAdminAPIResponse:
"""This is similar to wait_until_server_status() and is an example for using other information from a repeated
call, in this case check_status(TargetType.CLIENT). Custom code can be written to use any data available from
any call to make decisions for how to proceed. Take caution that the conditions will be met at some point, or
timeout should be set with logic outside this function to handle checks for potential errors or this may loop
indefinitely.
Args:
interval: in seconds, the time between consecutive checks of the server
timeout: if set, the amount of time this function will run until before returning a response message
callback: the reply from show_stats(TargetType.SERVER) will be passed to the callback, along with any additional kwargs
which can go on to perform additional logic.
fail_attempts: number of consecutive failed attempts of getting the server status before returning with ERROR_RUNTIME.
Returns: FLAdminAPIResponse
"""
failed_attempts = 0
start = time.time()
while True:
try:
reply = self.check_status(TargetType.CLIENT)
if reply:
met = callback(reply, **kwargs)
if met:
return FLAdminAPIResponse(APIStatus.SUCCESS, {}, None)
fail_attempts = 0
else:
print("Could not get reply from check status client, trying again later")
failed_attempts += 1
except Exception as e:
print(f"Could not get clients stats, trying again later. Exception: {secure_format_exception(e)}")
failed_attempts += 1
now = time.time()
if timeout is not None:
if now - start >= timeout:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": "Waited until timeout."}, None)
if failed_attempts > fail_attempts:
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME,
{
"message": "FL client status was not obtainable for more than the specified number of "
"fail_attempts. "
},
None,
)
time.sleep(interval)
@wrap_with_return_exception_responses
def wait_until_server_stats(
self,
interval: int = 10,
timeout: int = None,
callback: Callable[[FLAdminAPIResponse, Optional[List]], bool] = default_stats_handling_cb,
fail_attempts: int = 6,
**kwargs,
) -> FLAdminAPIResponse:
"""This is similar to wait_until_server_status() and is an example for using other information from a repeated
call, in this case show_stats(TargetType.SERVER). Custom code can be written to use any data available from any
call to make decisions for how to proceed. Take caution that the conditions will be met at some point, or
timeout should be set with logic outside this function to handle checks for potential errors or this may loop
indefinitely.
Args:
interval: in seconds, the time between consecutive checks of the server
timeout: if set, the amount of time this function will run until before returning a response message
callback: the reply from show_stats(TargetType.SERVER) will be passed to the callback, along with any additional kwargs
which can go on to perform additional logic.
fail_attempts: number of consecutive failed attempts of getting the server status before returning with ERROR_RUNTIME.
Returns: FLAdminAPIResponse
"""
failed_attempts = 0
start = time.time()
while True:
try:
reply = self.show_stats(TargetType.SERVER)
try:
if reply:
met = callback(reply, **kwargs)
if met:
return FLAdminAPIResponse(APIStatus.SUCCESS, {}, None)
fail_attempts = 0
else:
print("Could not get reply from show stats server, trying again later")
failed_attempts += 1
except AttributeError:
# if attribute cannot be found, check if app is no longer running to return APIStatus.SUCCESS
if reply.get("details").get("message") == "App is not running":
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": "Waited until app not running."}, None)
except Exception as e:
print(f"Could not get server stats, trying again later. Exception: {secure_format_exception(e)}")
failed_attempts += 1
now = time.time()
if timeout is not None:
if now - start >= timeout:
return FLAdminAPIResponse(APIStatus.SUCCESS, {"message": "Waited until timeout."}, None)
if failed_attempts > fail_attempts:
return FLAdminAPIResponse(
APIStatus.ERROR_RUNTIME,
{
"message": "FL server stats was not obtainable for more than the specified number of "
"fail_attempts. "
},
None,
)
time.sleep(interval)
| NVFlare-main | nvflare/fuel/hci/client/fl_admin_api.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from datetime import datetime
EXCLUDED_ACTIONS = {
"scheduler.check_resource",
"_check_session",
"_commands",
"__aux_command__",
}
class Auditor(object):
def __init__(self, audit_file_name: str):
"""Manages the audit file to log events.
Args:
audit_file_name (str): the location to save audit log file
"""
assert isinstance(audit_file_name, str), "audit_file_name must be str"
if os.path.exists(audit_file_name):
assert os.path.isfile(audit_file_name), "audit_file_name is not a valid file"
# create/open the file
self.audit_file = open(audit_file_name, "a")
def add_event(self, user: str, action: str, ref: str = "", msg: str = "") -> str:
if action in EXCLUDED_ACTIONS:
return ""
# server might already shut down, the audit_file could be None
if self.audit_file is None:
return ""
event_id = uuid.uuid4()
parts = [
f"[E:{event_id}]",
f"[R:{ref}]" if ref else "",
f"[T:{datetime.now()}]",
f"[U:{user}]",
f"[A:{action}]",
msg if msg else "",
]
line = "".join(parts)
self.audit_file.write(line + "\n")
self.audit_file.flush()
return str(event_id)
def add_job_event(
self, job_id: str, scope_name: str = "", task_name: str = "", task_id: str = "", ref: str = "", msg: str = ""
) -> str:
event_id = uuid.uuid4()
parts = [
f"[E:{event_id}]",
f"[R:{ref}]" if ref else "",
f"[T:{datetime.now()}]",
f"[S:{scope_name}]" if scope_name else "",
f"[J:{job_id}]",
f"[A:{task_name}#{task_id}]" if task_name else "",
msg if msg else "",
]
line = "".join(parts)
self.audit_file.write(line + "\n")
self.audit_file.flush()
return str(event_id)
def close(self):
if self.audit_file is not None:
self.audit_file.close()
self.audit_file = None
class AuditService(object):
"""Service for interacting with Auditor to add events to log."""
the_auditor = None
@staticmethod
def initialize(audit_file_name: str):
if not AuditService.the_auditor:
AuditService.the_auditor = Auditor(audit_file_name)
return AuditService.the_auditor
@staticmethod
def get_auditor():
return AuditService.the_auditor
@staticmethod
def add_event(user: str, action: str, ref: str = "", msg: str = "") -> str:
if not AuditService.the_auditor:
return ""
return AuditService.the_auditor.add_event(user, action, ref, msg)
@staticmethod
def add_job_event(
job_id: str, scope_name: str = "", task_name: str = "", task_id: str = "", ref: str = "", msg: str = ""
) -> str:
if not AuditService.the_auditor:
return ""
return AuditService.the_auditor.add_job_event(
scope_name=scope_name, job_id=job_id, task_name=task_name, task_id=task_id, ref=ref, msg=msg
)
@staticmethod
def close():
if AuditService.the_auditor:
AuditService.the_auditor.close()
| NVFlare-main | nvflare/fuel/sec/audit.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/sec/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from base64 import b64decode
from enum import Enum
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
class LoadResult(Enum):
"""Constants for different results when loading secure content."""
OK = "ok"
NOT_MANAGED = "notManaged"
NO_SUCH_CONTENT = "noSuchContent"
NOT_SIGNED = "notSigned"
INVALID_SIGNATURE = "invalidSignature"
INVALID_CONTENT = "invalidContent"
class SecurityContentManager(object):
def __init__(self, content_folder, signature_filename="signature.json", root_cert="rootCA.pem"):
"""Content manager used by SecurityContentService to load secure content.
Args:
content_folder (str): the folder path that includes signature file
signature_filename (str, optional): the signature file (signed dictionary). Defaults to "signature.json".
root_cert (str, optional): root CA certificate filename. Defaults to "rootCA.pem".
"""
self.content_folder = content_folder
signature_path = os.path.join(self.content_folder, signature_filename)
rootCA_cert_path = os.path.join(self.content_folder, root_cert)
if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):
self.signature = json.load(open(signature_path, "rt"))
for k in self.signature:
self.signature[k] = b64decode(self.signature[k].encode("utf-8"))
cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, "rb").read(), default_backend())
self.public_key = cert.public_key()
self.valid_config = True
else:
self.signature = dict()
self.valid_config = False
def load_content(self, file_under_verification):
"""Loads the data of the file under verification and verifies that the signature is valid.
Args:
file_under_verification: file to load and verify
Returns:
A tuple of the file data and the LoadResult. File data may be None if the data cannot be loaded.
"""
full_path = os.path.join(self.content_folder, file_under_verification)
data = None
if not os.path.exists(full_path):
return data, LoadResult.NO_SUCH_CONTENT
with open(full_path, "rb") as f:
data = f.read()
if not data:
return data, LoadResult.NO_SUCH_CONTENT
if self.valid_config and file_under_verification in self.signature:
signature = self.signature[file_under_verification]
try:
self.public_key.verify(
signature=signature,
data=data,
padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),
algorithm=hashes.SHA256(),
)
result = LoadResult.OK
except InvalidSignature:
result = LoadResult.INVALID_SIGNATURE
else:
result = LoadResult.NOT_SIGNED
return data, result
class SecurityContentService(object):
"""Uses SecurityContentManager to load secure content."""
security_content_manager = None
@staticmethod
def initialize(content_folder: str, signature_filename="signature.json", root_cert="rootCA.pem"):
if SecurityContentService.security_content_manager is None:
SecurityContentService.security_content_manager = SecurityContentManager(
content_folder, signature_filename, root_cert
)
@staticmethod
def load_content(file_under_verification):
if not SecurityContentService.security_content_manager:
return None, LoadResult.NOT_MANAGED
return SecurityContentService.security_content_manager.load_content(file_under_verification)
@staticmethod
def load_json(file_under_verification):
if not SecurityContentService.security_content_manager:
return None, LoadResult.NOT_MANAGED
json_data = None
data_bytes, result = SecurityContentService.security_content_manager.load_content(file_under_verification)
if data_bytes:
try:
data_text = data_bytes.decode("ascii")
json_data = json.loads(data_text)
except json.JSONDecodeError:
return None, LoadResult.INVALID_CONTENT
return json_data, result
| NVFlare-main | nvflare/fuel/sec/security_content_service.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from abc import ABC, abstractmethod
from enum import Enum
_KEY_PERMISSIONS = "permissions"
_KEY_FORMAT_VERSION = "format_version"
_TARGET_SITE = "site"
_TARGET_SUBMITTER = "submitter"
_ANY_RIGHT = "*"
class FieldNames(str, Enum):
USER_NAME = "User name"
USER_ORG = "User org"
USER_ROLE = "User role"
EXP = "Expression"
TARGET_TYPE = "Target type"
TARGET_VALUE = "Target value"
SITE_ORG = "Site org"
ROLE_NAME = "Role name"
RIGHT = "Right"
CATEGORY_RIGHT = "Right for Category"
class Person(object):
def __init__(self, name: str, org: str, role: str):
self.name = _normalize_str(name, FieldNames.USER_NAME)
self.org = _normalize_str(org, FieldNames.USER_ORG)
self.role = _normalize_str(role, FieldNames.USER_ROLE)
def __str__(self):
name = self.name if self.name else "None"
org = self.org if self.org else "None"
role = self.role if self.role else "None"
if (not name) and (not org) and (not role):
return "None"
else:
return f"{name}:{org}:{role}"
class AuthzContext(object):
def __init__(self, right: str, user: Person, submitter: Person = None):
"""Base class to contain context data for authorization."""
if not isinstance(user, Person):
raise ValueError(f"user needs to be of type Person but got {type(user)}")
if submitter and not isinstance(submitter, Person):
raise ValueError(f"submitter needs to be of type Person but got {type(submitter)}")
self.right = right
self.user = user
self.submitter = submitter
self.attrs = {}
if submitter is None:
self.submitter = Person("", "", "")
def set_attr(self, key: str, value):
self.attrs[key] = value
def get_attr(self, key: str, default=None):
return self.attrs.get(key, default)
class ConditionEvaluator(ABC):
@abstractmethod
def evaluate(self, site_org: str, ctx: AuthzContext) -> bool:
pass
class UserOrgEvaluator(ConditionEvaluator):
def __init__(self, target):
self.target = target
def evaluate(self, site_org: str, ctx: AuthzContext):
if self.target == _TARGET_SITE:
return ctx.user.org == site_org
elif self.target == _TARGET_SUBMITTER:
return ctx.user.org == ctx.submitter.org
else:
return ctx.user.org == self.target
class UserNameEvaluator(ConditionEvaluator):
def __init__(self, target: str):
self.target = target
def evaluate(self, site_org: str, ctx: AuthzContext):
if self.target == _TARGET_SUBMITTER:
return ctx.user.name == ctx.submitter.name
else:
return ctx.user.name == self.target
class TrueEvaluator(ConditionEvaluator):
def evaluate(self, site_org: str, ctx: AuthzContext) -> bool:
return True
class FalseEvaluator(ConditionEvaluator):
def evaluate(self, site_org: str, ctx: AuthzContext) -> bool:
return False
class _RoleRightConditions(object):
def __init__(self):
self.allowed_conditions = []
self.blocked_conditions = []
self.exp = None
def _any_condition_matched(self, conds: [ConditionEvaluator], site_org: str, ctx: AuthzContext):
# if any condition is met, return True
# only when all conditions fail to match, return False
for e in conds:
matched = e.evaluate(site_org, ctx)
if matched:
return True
return False
def evaluate(self, site_org: str, ctx: AuthzContext):
# first evaluate blocked list
if self.blocked_conditions:
if self._any_condition_matched(self.blocked_conditions, site_org, ctx):
# if any block condition is met, return False
return False
# evaluate allowed list
if self.allowed_conditions:
if self._any_condition_matched(self.allowed_conditions, site_org, ctx):
return True
else:
# all allowed conditions failed
return False
# no allowed list specified - only blocked list specified
# we got here since no blocked condition matched
return True
def _parse_one_expression(self, exp) -> str:
v = _normalize_str(exp, FieldNames.EXP)
blocked = False
parts = v.split()
if len(parts) == 2 and parts[0] == "not":
blocked = True
v = parts[1]
if v in ["all", "any"]:
ev = TrueEvaluator()
elif v in ["none", "no"]:
ev = FalseEvaluator()
else:
parts = v.split(":")
if len(parts) == 2:
target_type = _normalize_str(parts[0], FieldNames.TARGET_TYPE)
target_value = _normalize_str(parts[1], FieldNames.TARGET_VALUE)
if target_type in ["o", "org"]:
ev = UserOrgEvaluator(target_value)
elif target_type in ["n", "name"]:
ev = UserNameEvaluator(target_value)
else:
return f'bad condition expression "{exp}": invalid type "{target_type}"'
else:
return f'bad condition expression "{exp}"'
if blocked:
self.blocked_conditions.append(ev)
else:
self.allowed_conditions.append(ev)
return ""
def parse_expression(self, exp):
"""Parses the value expression into a list of condition(s).
Args:
exp: expression to be parsed
Returns:
An error string if value is invalid.
"""
self.exp = exp
if isinstance(exp, str):
return self._parse_one_expression(exp)
if isinstance(exp, list):
# we expect the list contains str only
if not exp:
# empty list
return "bad condition expression - no conditions specified"
for ex in exp:
err = self._parse_one_expression(ex)
if err:
# this is an error
return err
else:
return f"bad condition expression type - expect str or list but got {type(exp)}"
return ""
class Policy(object):
def __init__(self, config: dict, role_right_map: dict, roles: list, rights: list, role_rights: dict):
self.config = config
self.role_right_map = role_right_map
self.roles = roles
self.rights = rights
self.roles.sort()
self.rights.sort()
self.role_rights = role_rights
def get_rights(self):
return self.rights
def get_roles(self):
return self.roles
def _eval_for_role(self, role: str, site_org: str, ctx: AuthzContext):
conds = self.role_right_map.get(_role_right_key(role, _ANY_RIGHT))
if not conds:
conds = self.role_right_map.get(_role_right_key(role, ctx.right))
if not conds:
return False
return conds.evaluate(site_org, ctx)
def evaluate(self, site_org: str, ctx: AuthzContext) -> (bool, str):
"""
Args:
site_org:
ctx:
Returns:
A tuple of (result, error)
"""
site_org = _normalize_str(site_org, FieldNames.SITE_ORG)
permitted = self._eval_for_role(role=ctx.user.role, site_org=site_org, ctx=ctx)
if permitted:
# permitted if any role is okay
return True, ""
return False, ""
def _normalize_str(s: str, field_name: FieldNames) -> str:
if not isinstance(s, str):
raise TypeError(f"{field_name.value} must be a str but got {type(s)}")
return " ".join(s.lower().split())
def _role_right_key(role_name: str, right_name: str):
return role_name + ":" + right_name
def _add_role_right_conds(role, right, conds, rr_map: dict, rights, right_conds):
right_conds[right] = conds.exp
rr_map[_role_right_key(role, right)] = conds
if right not in rights:
rights.append(right)
def parse_policy_config(config: dict, right_categories: dict):
"""Validates that an authorization policy configuration has the right syntax.
Args:
config: configuration dictionary to validate
right_categories: a dict of right => category mapping
Returns: a Policy object if no error, a string describing the error encountered
"""
if not isinstance(config, dict):
return None, f"policy definition must be a dict but got {type(config)}"
if not config:
# empty policy
return None, "policy definition is empty"
role_right_map = {}
role_rights = {}
roles = []
rights = []
# Compute category => right list
cat_to_rights = {}
if right_categories:
for r, c in right_categories.items():
right_list = cat_to_rights.get(c)
if not right_list:
right_list = []
right_list.append(r)
cat_to_rights[c] = right_list
# check version
format_version = config.get(_KEY_FORMAT_VERSION)
if not format_version or format_version != "1.0":
return None, "missing or invalid policy format_version: must be 1.0"
permissions = config.get(_KEY_PERMISSIONS)
if not permissions:
return None, "missing permissions"
if not isinstance(permissions, dict):
return None, f"invalid permissions: expect a dict but got {type(permissions)}"
# permissions is a dict of role => rights;
for role_name, right_conf in permissions.items():
if not isinstance(role_name, str):
return None, f"bad role name: expect a str but got {type(role_name)}"
role_name = _normalize_str(role_name, FieldNames.ROLE_NAME)
roles.append(role_name)
right_conds = {} # rights of this role
role_rights[role_name] = right_conds
if isinstance(right_conf, str) or isinstance(right_conf, list):
conds = _RoleRightConditions()
err = conds.parse_expression(right_conf)
if err:
return None, err
_add_role_right_conds(role_name, _ANY_RIGHT, conds, role_right_map, rights, right_conds)
continue
if not isinstance(right_conf, dict):
return None, f"bad right config: expect a dict but got {type(right_conf)}"
# process right categories
for right, exp in right_conf.items():
if not isinstance(right, str):
return None, f"bad right name: expect a str but got {type(right)}"
right = _normalize_str(right, FieldNames.CATEGORY_RIGHT)
# see whether this is a right category
right_list = cat_to_rights.get(right)
if not right_list:
# this is a regular right - skip it
continue
conds = _RoleRightConditions()
err = conds.parse_expression(exp)
if err:
return None, err
# all rights in the category share the same conditions
_add_role_right_conds(role_name, right, conds, role_right_map, rights, right_conds)
for r in right_list:
_add_role_right_conds(role_name, r, conds, role_right_map, rights, right_conds)
# process regular rights, which may override the rights from categories
for right, exp in right_conf.items():
right = _normalize_str(right, FieldNames.RIGHT)
# see whether this is a right category
right_list = cat_to_rights.get(right)
if right_list:
# this is category - already processed
continue
conds = _RoleRightConditions()
err = conds.parse_expression(exp)
if err:
return None, err
# this may cause the same right to be overwritten in the map
_add_role_right_conds(role_name, right, conds, role_right_map, rights, right_conds)
return Policy(config=config, role_right_map=role_right_map, role_rights=role_rights, roles=roles, rights=rights), ""
class Authorizer(object):
def __init__(self, site_org: str, right_categories: dict = None):
"""Base class containing the authorization policy."""
self.site_org = _normalize_str(site_org, FieldNames.SITE_ORG)
self.right_categories = right_categories
self.policy = None
self.last_load_time = None
def get_policy(self) -> Policy:
return self.policy
def authorize(self, ctx: AuthzContext) -> (bool, str):
if not ctx:
return True, ""
if not isinstance(ctx, AuthzContext):
return False, f"ctx must be AuthzContext but got {type(ctx)}"
if "super" == ctx.user.role:
# use this for testing purpose
return True, ""
authorized, err = self.evaluate(ctx)
if not authorized:
if err:
return False, err
else:
return (
False,
f"user '{ctx.user.name}' is not authorized for '{ctx.right}'",
)
return True, ""
def evaluate(self, ctx: AuthzContext) -> (bool, str):
if not self.policy:
return False, "policy not defined"
return self.policy.evaluate(ctx=ctx, site_org=self.site_org)
def load_policy(self, policy_config: dict) -> str:
policy, err = parse_policy_config(policy_config, self.right_categories)
if err:
return err
self.policy = policy
self.last_load_time = time.time()
return ""
class AuthorizationService(object):
the_authorizer = None
@staticmethod
def initialize(authorizer: Authorizer) -> (Authorizer, str):
if not isinstance(authorizer, Authorizer):
raise ValueError(f"authorizer must be Authorizer but got {type(authorizer)}")
if not AuthorizationService.the_authorizer:
# authorizer is not loaded
AuthorizationService.the_authorizer = authorizer
return AuthorizationService.the_authorizer, ""
@staticmethod
def get_authorizer():
return AuthorizationService.the_authorizer
@staticmethod
def authorize(ctx: AuthzContext):
if not AuthorizationService.the_authorizer:
# no authorizer - assume that authorization is not required
return True, ""
return AuthorizationService.the_authorizer.authorize(ctx)
| NVFlare-main | nvflare/fuel/sec/authz.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommError(Exception):
# Error codes
ERROR = "ERROR"
NOT_READY = "NOT_READY"
BAD_DATA = "BAD_DATA"
BAD_CONFIG = "BAD_CONFIG"
CLOSED = "CLOSED"
NOT_SUPPORTED = "NOT_SUPPORTED"
TIMEOUT = "TIMEOUT"
def __init__(self, code: str, message=None):
self.code = code
self.message = message
def __str__(self):
if self.message:
return f"Code: {self.code} Error: {self.message}"
else:
return f"Code: {self.code}"
| NVFlare-main | nvflare/fuel/f3/comm_error.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.fuel.f3.drivers.net_utils import MAX_PAYLOAD_SIZE
from nvflare.fuel.utils.config import Config
from nvflare.fuel.utils.config_service import ConfigService
_comm_config_files = ["comm_config.json", "comm_config.json.default"]
DEFAULT_MAX_MSG_SIZE = MAX_PAYLOAD_SIZE
class VarName:
MAX_MESSAGE_SIZE = "max_message_size"
ALLOW_ADHOC_CONNS = "allow_adhoc_conns"
ADHOC_CONN_SCHEME = "adhoc_conn_scheme"
INTERNAL_CONN_SCHEME = "internal_conn_scheme"
BACKBONE_CONN_GEN = "backbone_conn_gen"
SUBNET_HEARTBEAT_INTERVAL = "subnet_heartbeat_interval"
SUBNET_TROUBLE_THRESHOLD = "subnet_trouble_threshold"
COMM_DRIVER_PATH = "comm_driver_path"
HEARTBEAT_INTERVAL = "heartbeat_interval"
class CommConfigurator:
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
config: Config = ConfigService.load_configuration(file_basename=_comm_config_files[0])
self.config = None if config is None else config.to_dict()
def get_config(self):
return self.config
def get_max_message_size(self):
return ConfigService.get_int_var(VarName.MAX_MESSAGE_SIZE, self.config, default=DEFAULT_MAX_MSG_SIZE)
def allow_adhoc_connections(self, default):
return ConfigService.get_bool_var(VarName.ALLOW_ADHOC_CONNS, self.config, default=default)
def get_adhoc_connection_scheme(self, default):
return ConfigService.get_str_var(VarName.ADHOC_CONN_SCHEME, self.config, default=default)
def get_internal_connection_scheme(self, default):
return ConfigService.get_str_var(VarName.INTERNAL_CONN_SCHEME, self.config, default=default)
def get_backbone_connection_generation(self, default):
return ConfigService.get_int_var(VarName.BACKBONE_CONN_GEN, self.config, default=default)
def get_subnet_heartbeat_interval(self, default):
return ConfigService.get_int_var(VarName.SUBNET_HEARTBEAT_INTERVAL, self.config, default)
def get_subnet_trouble_threshold(self, default):
return ConfigService.get_int_var(VarName.SUBNET_TROUBLE_THRESHOLD, self.config, default)
def get_comm_driver_path(self, default):
return ConfigService.get_str_var(VarName.COMM_DRIVER_PATH, self.config, default=default)
def get_heartbeat_interval(self, default):
return ConfigService.get_int_var(VarName.HEARTBEAT_INTERVAL, self.config, default)
| NVFlare-main | nvflare/fuel/f3/comm_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import sys
import threading
import time
from typing import List, Tuple, Union
_KEY_MAX = "max"
_KEY_MIN = "min"
_KEY_NAME = "name"
_KEY_DESC = "description"
_KEY_TOTAL = "total"
_KEY_COUNT = "count"
_KEY_UNIT = "unit"
_KEY_MARKS = "marks"
_KEY_COUNTER_NAMES = "counter_names"
_KEY_CAT_DATA = "cat_data"
class StatsMode:
COUNT = "count"
PERCENT = "percent"
AVERAGE = "avg"
MIN = "min"
MAX = "max"
VALID_HIST_MODES = [StatsMode.COUNT, StatsMode.PERCENT, StatsMode.AVERAGE, StatsMode.MAX, StatsMode.MIN]
def format_value(v: float, n=3):
if v is None:
return "n/a"
fmt = "{:." + str(n) + "e}"
return fmt.format(v)
class _Bin:
def __init__(self, count=0, total_value=0.0, min_value=None, max_value=None):
self.count = count
self.total = total_value
self.min = min_value
self.max = max_value
def record_value(self, value: float):
self.count += 1
self.total += value
if self.min is None or self.min > value:
self.min = value
if self.max is None or self.max < value:
self.max = value
def get_content(self, mode=StatsMode.COUNT, total_count=0):
if self.count == 0:
return ""
if mode == StatsMode.COUNT:
return str(self.count)
if mode == StatsMode.PERCENT:
return str(round(self.count / total_count, 2))
if mode == StatsMode.AVERAGE:
avg = self.total / self.count
return format_value(avg)
if mode == StatsMode.MIN:
return format_value(self.min)
if mode == StatsMode.MAX:
return format_value(self.max)
return "n/a"
def to_dict(self) -> dict:
return {
_KEY_COUNT: self.count,
_KEY_TOTAL: self.total,
_KEY_MIN: self.min if self.min is not None else "",
_KEY_MAX: self.max if self.max is not None else "",
}
@staticmethod
def from_dict(d: dict):
if not isinstance(d, dict):
raise ValueError(f"d must be dict but got {type(d)}")
b = _Bin()
b.count = d.get(_KEY_COUNT, 0)
b.total = d.get(_KEY_TOTAL, 0)
m = d.get(_KEY_MIN)
if isinstance(m, str):
b.min = None
else:
b.min = m
x = d.get(_KEY_MAX)
if isinstance(x, str):
b.max = None
else:
b.max = x
return b
class StatsPool:
def __init__(self, name: str, description: str):
self.name = name
self.description = description
def to_dict(self) -> dict:
pass
def get_table(self, mode):
pass
@staticmethod
def from_dict(d: dict):
pass
class RecordWriter:
def write(self, pool_name: str, category: str, value: float, report_time: float):
pass
def close(self):
pass
class HistPool(StatsPool):
def __init__(self, name: str, description: str, marks: Union[List[float], Tuple], unit: str, record_writer=None):
if record_writer:
if not isinstance(record_writer, RecordWriter):
raise TypeError(f"record_writer must be RecordWriter but got {type(record_writer)}")
StatsPool.__init__(self, name, description)
self.update_lock = threading.Lock()
self.unit = unit
self.marks = marks
self.record_writer = record_writer # used for writing raw records
self.cat_bins = {} # category name => list of bins
if not marks:
raise ValueError("marks not specified")
if len(marks) < 2:
raise ValueError(f"marks must have at least two numbers but got {len(marks)}")
for i in range(1, len(marks)):
if marks[i] <= marks[i - 1]:
raise ValueError(f"marks must contain increasing values, but got {marks}")
# A range is defined: left <= N < right [...)
# [..., M1) [M1, M2) [M2, M3) [M3, ...)
m = sys.float_info.max
self.ranges = [(-m, marks[0])]
self.range_names = [f"<{marks[0]}"]
for i in range(len(marks) - 1):
self.ranges.append((marks[i], marks[i + 1]))
self.range_names.append(f"{marks[i]}-{marks[i+1]}")
self.ranges.append((marks[-1], m))
self.range_names.append(f">={marks[-1]}")
def record_value(self, category: str, value: float):
with self.update_lock:
bins = self.cat_bins.get(category)
if bins is None:
bins = [None for _ in range(len(self.ranges))]
self.cat_bins[category] = bins
for i in range(len(self.ranges)):
r = self.ranges[i]
if r[0] <= value < r[1]:
b = bins[i]
if not b:
b = _Bin()
bins[i] = b
b.record_value(value)
if self.record_writer:
self.record_writer.write(pool_name=self.name, category=category, value=value, report_time=time.time())
def get_table(self, mode=StatsMode.COUNT):
with self.update_lock:
headers = ["category"]
has_values = [False for _ in range(len(self.ranges))]
# determine bins that have values in any category
for _, bins in self.cat_bins.items():
for i in range(len(self.ranges)):
if bins[i]:
has_values[i] = True
for i in range(len(self.ranges)):
if has_values[i]:
headers.append(self.range_names[i])
headers.append("overall")
rows = []
for cat_name in sorted(self.cat_bins.keys()):
bins = self.cat_bins[cat_name]
total_count = 0
total_value = 0.0
overall_min = None
overall_max = None
for b in bins:
if b:
total_count += b.count
total_value += b.total
if b.max is not None:
if overall_max is None or overall_max < b.max:
overall_max = b.max
if b.min is not None:
if overall_min is None or overall_min > b.min:
overall_min = b.min
r = [cat_name]
for i in range(len(bins)):
if not has_values[i]:
continue
b = bins[i]
if not b:
r.append("")
else:
r.append(b.get_content(mode, total_count))
# compute overall values
overall_bin = _Bin(
count=total_count, total_value=total_value, max_value=overall_max, min_value=overall_min
)
r.append(overall_bin.get_content(mode, total_count))
rows.append(r)
return headers, rows
def to_dict(self):
with self.update_lock:
cat_bins = {}
for cat, bins in self.cat_bins.items():
exp_bins = []
for b in bins:
if not b:
exp_bins.append("")
else:
exp_bins.append(b.to_dict())
cat_bins[cat] = exp_bins
return {
_KEY_NAME: self.name,
_KEY_DESC: self.description,
_KEY_MARKS: list(self.marks),
_KEY_UNIT: self.unit,
_KEY_CAT_DATA: cat_bins,
}
@staticmethod
def from_dict(d: dict):
p = HistPool(
name=d.get(_KEY_NAME, ""),
description=d.get(_KEY_DESC, ""),
unit=d.get(_KEY_UNIT, ""),
marks=d.get(_KEY_MARKS),
)
cat_bins = d.get(_KEY_CAT_DATA)
if not cat_bins:
return p
for cat, bins in cat_bins.items():
in_bins = []
for b in bins:
if not b:
in_bins.append(None)
else:
assert isinstance(b, dict)
in_bins.append(_Bin.from_dict(b))
p.cat_bins[cat] = in_bins
return p
class CounterPool(StatsPool):
def __init__(self, name: str, description: str, counter_names: List[str], dynamic_counter_name=True):
if not counter_names and not dynamic_counter_name:
raise ValueError("counter_names cannot be empty")
StatsPool.__init__(self, name, description)
self.counter_names = counter_names
self.cat_counters = {} # dict of cat_name => counter dict (counter_name => int)
self.dynamic_counter_name = dynamic_counter_name
self.update_lock = threading.Lock()
def increment(self, category: str, counter_name: str, amount=1):
with self.update_lock:
if counter_name not in self.counter_names:
if self.dynamic_counter_name:
self.counter_names.append(counter_name)
else:
raise ValueError(f"'{counter_name}' is not defined in pool '{self.name}'")
counters = self.cat_counters.get(category)
if not counters:
counters = {}
self.cat_counters[category] = counters
c = counters.get(counter_name, 0)
c += amount
counters[counter_name] = c
def get_table(self, mode=""):
with self.update_lock:
headers = ["category"]
eff_counter_names = []
for cn in self.counter_names:
for _, counters in self.cat_counters.items():
v = counters.get(cn, 0)
if v > 0:
eff_counter_names.append(cn)
break
headers.extend(eff_counter_names)
rows = []
for cat_name in sorted(self.cat_counters.keys()):
counters = self.cat_counters[cat_name]
r = [cat_name]
for cn in eff_counter_names:
value = counters.get(cn, 0)
r.append(str(value))
rows.append(r)
return headers, rows
def to_dict(self):
with self.update_lock:
return {
_KEY_NAME: self.name,
_KEY_DESC: self.description,
_KEY_COUNTER_NAMES: list(self.counter_names),
_KEY_CAT_DATA: self.cat_counters,
}
@staticmethod
def from_dict(d: dict):
p = CounterPool(
name=d.get(_KEY_NAME, ""), description=d.get(_KEY_DESC, ""), counter_names=d.get(_KEY_COUNTER_NAMES)
)
p.cat_counters = d.get(_KEY_CAT_DATA)
return p
def new_time_pool(name: str, description="", marks=None, record_writer=None) -> HistPool:
if not marks:
marks = (0.0001, 0.0005, 0.001, 0.002, 0.004, 0.008, 0.01, 0.02, 0.04, 0.08, 0.1, 0.2, 0.4, 0.8, 1.0, 2.0)
return HistPool(name=name, description=description, marks=marks, unit="second", record_writer=record_writer)
def new_message_size_pool(name: str, description="", marks=None, record_writer=None) -> HistPool:
if not marks:
marks = (0.01, 0.1, 1, 10, 50, 100, 200, 500, 800, 1000)
return HistPool(name=name, description=description, marks=marks, unit="MB", record_writer=record_writer)
def parse_hist_mode(mode: str) -> str:
if not mode:
return StatsMode.COUNT
if mode.startswith("p"):
return StatsMode.PERCENT
elif mode.startswith("c"):
return StatsMode.COUNT
elif mode.startswith("a"):
return StatsMode.AVERAGE
if mode not in VALID_HIST_MODES:
return ""
else:
return mode
class StatsPoolManager:
_CONFIG_KEY_SAVE_POOLS = "save_pools"
lock = threading.Lock()
pools = {} # name => pool
pool_config = {}
record_writer = None
@classmethod
def _check_name(cls, name, scope):
name = name.lower()
if name not in cls.pools:
return name
if scope:
name = f"{name}@{scope}"
if name not in cls.pools:
return name
raise ValueError(f"pool '{name}' is already defined")
@classmethod
def set_pool_config(cls, config: dict):
if not isinstance(config, dict):
raise ValueError(f"config data must be dict but got {type(config)}")
for k, v in config.items():
cls.pool_config[k.lower()] = v
@classmethod
def set_record_writer(cls, record_writer: RecordWriter):
if not isinstance(record_writer, RecordWriter):
raise TypeError(f"record_writer must be RecordWriter but got {type(record_writer)}")
cls.record_writer = record_writer
@classmethod
def _keep_hist_records(cls, name):
name = name.lower()
save_pools_list = cls.pool_config.get(cls._CONFIG_KEY_SAVE_POOLS, None)
if not save_pools_list:
return False
return ("*" in save_pools_list) or (name in save_pools_list)
@classmethod
def add_time_hist_pool(cls, name: str, description: str, marks=None, scope=None):
# check pool config
keep_records = cls._keep_hist_records(name)
name = cls._check_name(name, scope)
record_writer = cls.record_writer if keep_records else None
p = new_time_pool(name, description, marks, record_writer=record_writer)
cls.pools[name] = p
return p
@classmethod
def add_msg_size_pool(cls, name: str, description: str, marks=None, scope=None):
keep_records = cls._keep_hist_records(name)
name = cls._check_name(name, scope)
record_writer = cls.record_writer if keep_records else None
p = new_message_size_pool(name, description, marks, record_writer=record_writer)
cls.pools[name] = p
return p
@classmethod
def add_counter_pool(cls, name: str, description: str, counter_names: list, scope=None):
name = cls._check_name(name, scope)
p = CounterPool(name, description, counter_names)
cls.pools[name] = p
return p
@classmethod
def get_pool(cls, name: str):
name = name.lower()
return cls.pools.get(name)
@classmethod
def delete_pool(cls, name: str):
with cls.lock:
name = name.lower()
return cls.pools.pop(name, None)
@classmethod
def get_table(cls):
with cls.lock:
headers = ["pool", "type", "description"]
rows = []
for k in sorted(cls.pools.keys()):
v = cls.pools[k]
r = [v.name]
if isinstance(v, HistPool):
t = "hist"
elif isinstance(v, CounterPool):
t = "counter"
else:
t = "?"
r.append(t)
r.append(v.description)
rows.append(r)
return headers, rows
@classmethod
def to_dict(cls):
with cls.lock:
result = {}
for k in sorted(cls.pools.keys()):
v = cls.pools[k]
if isinstance(v, HistPool):
t = "hist"
elif isinstance(v, CounterPool):
t = "counter"
else:
raise ValueError(f"unknown type of pool '{k}'")
result[k] = {"type": t, "pool": v.to_dict()}
return result
@classmethod
def from_dict(cls, d: dict):
cls.pools = {}
for k, v in d.items():
t = v.get("type")
if not t:
raise ValueError("missing pool type")
pd = v.get("pool")
if not pd:
raise ValueError("missing pool data")
if t == "hist":
p = HistPool.from_dict(pd)
elif t == "counter":
p = CounterPool.from_dict(pd)
else:
raise ValueError(f"invalid pool type {t}")
cls.pools[k] = p
@classmethod
def dump_summary(cls, file_name: str):
stats_dict = cls.to_dict()
json_string = json.dumps(stats_dict, indent=4)
with open(file_name, "w") as f:
f.write(json_string)
@classmethod
def close(cls):
if cls.record_writer:
cls.record_writer.close()
class CsvRecordHandler(RecordWriter):
def __init__(self, file_name):
self.file = open(file_name, "w")
self.writer = csv.writer(self.file)
self.lock = threading.Lock()
def write(self, pool_name: str, category: str, value: float, report_time: float):
if not pool_name.isascii():
raise ValueError(f"pool_name {pool_name} contains non-ascii chars")
if not category.isascii():
raise ValueError(f"category {category} contains non-ascii chars")
row = [pool_name, category, report_time, value]
with self.lock:
self.writer.writerow(row)
def close(self):
self.file.close()
@staticmethod
def read_records(csv_file_name: str):
pools = {}
reader = CsvRecordReader(csv_file_name)
for rec in reader:
pool_name = rec.pool_name
cat_name = rec.category
report_time = rec.report_time
value = rec.value
cats = pools.get(pool_name)
if not cats:
cats = {}
pools[pool_name] = cats
recs = cats.get(cat_name)
if not recs:
recs = []
cats[cat_name] = recs
recs.append((report_time, value))
return pools
class StatsRecord:
def __init__(self, pool_name, category, report_time, value):
self.pool_name = pool_name
self.category = category
self.report_time = report_time
self.value = value
class CsvRecordReader:
def __init__(self, csv_file_name: str):
self.csv_file_name = csv_file_name
self.file = open(csv_file_name)
self.reader = csv.reader(self.file)
def __iter__(self):
return self
def __next__(self):
row = next(self.reader)
if len(row) != 4:
raise ValueError(f"'{self.csv_file_name}' is not a valid stats pool record file: bad row length {len(row)}")
pool_name = row[0]
cat_name = row[1]
report_time = float(row[2])
value = float(row[3])
return StatsRecord(pool_name, cat_name, report_time, value)
| NVFlare-main | nvflare/fuel/f3/stats_pool.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Optional
from nvflare.fuel.f3.connection import Connection
from nvflare.fuel.f3.endpoint import Endpoint
class AppIds:
"""Reserved application IDs"""
ALL = 0
DEFAULT = 1
CELL_NET = 2
PUB_SUB = 3
class Message:
def __init__(self, headers: Optional[dict] = None, payload: Any = None):
"""Construct an FCI message"""
self.headers = headers
self.payload = payload
def set_header(self, key: str, value):
if self.headers is None:
self.headers = {}
self.headers[key] = value
def add_headers(self, headers: dict):
if self.headers is None:
self.headers = {}
self.headers.update(headers)
def get_header(self, key: str, default=None):
if self.headers is None:
return None
return self.headers.get(key, default)
def remove_header(self, key: str):
if self.headers:
self.headers.pop(key, None)
def set_prop(self, key: str, value):
setattr(self, key, value)
def get_prop(self, key: str, default=None):
try:
return getattr(self, key)
except AttributeError:
return default
class MessageReceiver(ABC):
@abstractmethod
def process_message(self, endpoint: Endpoint, connection: Connection, app_id: int, message: Message):
pass
| NVFlare-main | nvflare/fuel/f3/message.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import signal
import threading
import time
from nvflare.fuel.common.excepts import ComponentNotAuthorized, ConfigError
from nvflare.fuel.common.exit_codes import ProcessExitCode
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.security.logging import secure_format_exception, secure_format_traceback
class MainProcessMonitor:
"""MPM (Main Process Monitor). It's used to run main thread and to handle graceful shutdown"""
name = "MPM"
_cleanup_cbs = []
_stopping = False
_logger = None
_aio_ctx = None
@classmethod
def set_name(cls, name: str):
if not name:
raise ValueError("name must be specified")
if not isinstance(name, str):
raise ValueError(f"name must be str but got {type(name)}")
cls.name = name
@classmethod
def is_stopping(cls):
return cls._stopping
@classmethod
def get_aio_context(cls):
if not cls._aio_ctx:
cls._aio_ctx = AioContext.get_global_context()
return cls._aio_ctx
@classmethod
def logger(cls):
if not cls._logger:
cls._logger = logging.getLogger(cls.name)
return cls._logger
@classmethod
def add_cleanup_cb(cls, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified cleanup_cb {type(cb)} is not callable")
for _cb in cls._cleanup_cbs:
if cb == _cb[0]:
raise RuntimeError(f"cleanup CB {cb.__name__} is already registered")
cls._cleanup_cbs.append((cb, args, kwargs))
@classmethod
def _call_cb(cls, t: tuple):
cb, args, kwargs = t[0], t[1], t[2]
try:
return cb(*args, **kwargs)
except Exception as ex:
cls.logger().error(f"exception from CB {cb.__name__}: {type(secure_format_exception(ex))}")
@classmethod
def _start_shutdown(cls, shutdown_grace_time, cleanup_grace_time):
logger = cls.logger()
if not cls._cleanup_cbs:
logger.debug(f"=========== {cls.name}: Nothing to cleanup ...")
return
logger.debug(f"=========== {cls.name}: Shutting down. Starting cleanup ...")
time.sleep(shutdown_grace_time) # let pending activities finish
cleanup_waiter = threading.Event()
t = threading.Thread(target=cls._do_cleanup, args=(cleanup_waiter,))
t.daemon = True
t.start()
if not cleanup_waiter.wait(timeout=cleanup_grace_time):
logger.warning(f"======== {cls.name}: Cleanup did not complete within {cleanup_grace_time} secs")
@classmethod
def _cleanup_one_round(cls, cbs):
logger = cls.logger()
for _cb in cbs:
cb_name = ""
try:
cb_name = _cb[0].__name__
logger.debug(f"{cls.name}: calling cleanup CB {cb_name}")
cls._call_cb(_cb)
logger.debug(f"{cls.name}: finished cleanup CB {cb_name}")
except Exception as ex:
logger.warning(f"{cls.name}: exception {secure_format_exception(ex)} from cleanup CB {cb_name}")
@classmethod
def _do_cleanup(cls, waiter: threading.Event):
max_cleanup_rounds = 10
logger = cls.logger()
# during cleanup, a cleanup CB can add another cleanup CB
# we will call cleanup multiple rounds until no more CBs are added or tried max number of rounds
for i in range(max_cleanup_rounds):
cbs = cls._cleanup_cbs
cls._cleanup_cbs = []
if cbs:
logger.debug(f"{cls.name}: cleanup round {i + 1}")
cls._cleanup_one_round(cbs)
logger.debug(f"{cls.name}: finished cleanup round {i + 1}")
else:
break
if cls._cleanup_cbs:
logger.warning(f"{cls.name}: there are still cleanup CBs after {max_cleanup_rounds} rounds")
logger.debug(f"{cls.name}: Cleanup Finished!")
waiter.set()
@classmethod
def run(cls, main_func, shutdown_grace_time=1.5, cleanup_grace_time=1.5):
if not callable(main_func):
raise ValueError("main_func must be runnable")
# this method must be called from main method
t = threading.current_thread()
if t.name != "MainThread":
raise RuntimeError(
f"{cls.name}: the mpm.run() method is called from {t.name}: it must be called from the MainThread"
)
# call and wait for the main_func to complete
logger = cls.logger()
logger.debug(f"=========== {cls.name}: started to run forever")
try:
rc = main_func()
except ConfigError as ex:
# already handled
rc = ProcessExitCode.CONFIG_ERROR
logger.error(secure_format_traceback())
except ComponentNotAuthorized as ex:
rc = ProcessExitCode.UNSAFE_COMPONENT
logger.error(secure_format_traceback())
except Exception as ex:
rc = ProcessExitCode.EXCEPTION
logger.error(f"Execute exception: {secure_format_exception(ex)}")
logger.error(secure_format_traceback())
# start shutdown process
cls._stopping = True
cls._start_shutdown(shutdown_grace_time, cleanup_grace_time)
# We can now stop the AIO loop!
AioContext.close_global_context()
logger.debug(f"=========== {cls.name}: checking running threads")
num_active_threads = 0
for thread in threading.enumerate():
if thread.name != "MainThread" and not thread.daemon:
logger.warning(f"#### {cls.name}: still running thread {thread.name}")
num_active_threads += 1
logger.info(f"{cls.name}: Good Bye!")
if num_active_threads > 0:
try:
os.kill(os.getpid(), signal.SIGKILL)
except Exception as ex:
logger.debug(f"Failed to kill process {os.getpid()}: {secure_format_exception(ex)}")
return rc
| NVFlare-main | nvflare/fuel/f3/mpm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from abc import ABC, abstractmethod
from enum import Enum
from typing import Union
from nvflare.fuel.f3.drivers.connector_info import ConnectorInfo, Mode
from nvflare.fuel.f3.drivers.driver_params import DriverParams
log = logging.getLogger(__name__)
lock = threading.Lock()
conn_count = 0
BytesAlike = Union[bytes, bytearray, memoryview]
def create_connection_name():
global lock, conn_count
with lock:
conn_count += 1
return "CN%05d" % conn_count
class ConnState(Enum):
IDLE = 1 # Initial state
CONNECTED = 2 # New connection
CLOSED = 3 # Connection is closed
class FrameReceiver(ABC):
@abstractmethod
def process_frame(self, frame: BytesAlike):
"""Frame received callback
Args:
frame: The frame received
Raises:
CommError: If any error happens while processing the frame
"""
pass
class Connection(ABC):
"""FCI connection spec. A connection is used to transfer opaque frames"""
def __init__(self, connector: ConnectorInfo):
self.name = create_connection_name()
self.state = ConnState.IDLE
self.frame_receiver = None
self.connector = connector
@abstractmethod
def get_conn_properties(self) -> dict:
"""Get connection specific properties, like peer address, TLS certificate etc
Raises:
CommError: If any errors
"""
pass
@abstractmethod
def close(self):
"""Close connection
Raises:
CommError: If any errors
"""
pass
@abstractmethod
def send_frame(self, frame: BytesAlike):
"""Send a SFM frame through the connection to the remote endpoint.
Args:
frame: The frame to be sent
Raises:
CommError: If any error happens while sending the frame
"""
pass
def register_frame_receiver(self, receiver: FrameReceiver):
"""Register frame receiver
Args:
receiver: The frame receiver
"""
self.frame_receiver = receiver
def process_frame(self, frame: BytesAlike):
"""A convenience function to call frame receiver
Args:
frame: The frame to be processed
Raises:
CommError: If any error happens while processing the frame
"""
if self.frame_receiver:
self.frame_receiver.process_frame(frame)
else:
log.error(f"Frame receiver not registered for {self}")
def __str__(self):
if self.state != ConnState.CONNECTED:
return f"[{self.name} Not Connected]"
conn_props = self.get_conn_properties()
local_addr = conn_props.get(DriverParams.LOCAL_ADDR, "N/A")
peer_addr = conn_props.get(DriverParams.PEER_ADDR, "N/A")
direction = "=>" if self.connector.mode == Mode.ACTIVE else "<="
peer_cn = conn_props.get(DriverParams.PEER_CN, None)
cn = " SSL " + peer_cn if peer_cn else ""
return f"[{self.name} {local_addr} {direction} {peer_addr}{cn}]"
| NVFlare-main | nvflare/fuel/f3/connection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from enum import IntEnum
class EndpointState(IntEnum):
IDLE = 0 # Initial state
READY = 1 # Endpoint is ready
CLOSING = 2 # Endpoint is closing, can't send
DISCONNECTED = 3 # Endpoint is disconnected
ERROR = 4 # Endpoint is in error state
class Endpoint:
"""Endpoint represents a logical party in the SFM network. For each communicator,
there is only one local endpoint. There may be multiple remote endpoints.
A remote endpoint may be reachable through multiple connections"""
def __init__(self, name: str, properties: dict = None, conn_props: dict = None):
"""Construct an endpoint
Args:
name: The endpoint name
properties: Public properties exchanged with peer
conn_props: Connection properties and local credentials like certificates
Raises:
CommError: If any error happens while sending the request
"""
self.name = name
self.state = EndpointState.IDLE
# public properties exchanged while handshake
self.properties = properties if properties else {}
# Connection properties like peer address, certificate location
self.conn_props = conn_props if conn_props else {}
def set_prop(self, key, value):
self.properties[key] = value
def get_prop(self, key):
return self.properties.get(key)
class EndpointMonitor(ABC):
"""Monitor for endpoint lifecycle changes"""
@abstractmethod
def state_change(self, endpoint: Endpoint):
pass
| NVFlare-main | nvflare/fuel/f3/endpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import logging
import os
import weakref
from typing import Optional
from nvflare.fuel.f3 import drivers
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.drivers.driver import Driver
from nvflare.fuel.f3.drivers.driver_manager import DriverManager
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.drivers.net_utils import parse_url
from nvflare.fuel.f3.endpoint import Endpoint, EndpointMonitor
from nvflare.fuel.f3.message import Message, MessageReceiver
from nvflare.fuel.f3.sfm.conn_manager import ConnManager, Mode
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
_running_instances = weakref.WeakSet()
driver_mgr = DriverManager()
driver_loaded = False
def load_comm_drivers():
global driver_loaded
# Load all the drivers in the drivers module
driver_mgr.search_folder(os.path.dirname(drivers.__file__), drivers.__package__)
# Load custom drivers
driver_path = CommConfigurator().get_comm_driver_path(None)
if not driver_path:
return
for path in driver_path.split(os.pathsep):
log.debug(f"Custom driver folder {path} is searched")
driver_mgr.search_folder(path, None)
driver_loaded = True
class Communicator:
"""FCI (Flare Communication Interface) main communication API"""
def __init__(self, local_endpoint: Endpoint):
self.local_endpoint = local_endpoint
self.monitors = []
self.conn_manager = ConnManager(local_endpoint)
self.stopped = False
def start(self):
"""Start the communicator and establishing all the connections
Raises:
CommError: If any error encountered while starting up
"""
self.conn_manager.start()
log.debug(f"Communicator for local endpoint: {self.local_endpoint.name} is started")
_running_instances.add(self)
def stop(self):
"""Stop the communicator and shutdown all the connections
Raises:
CommError: If any error encountered while shutting down
"""
if self.stopped:
return
self.conn_manager.stop()
self.stopped = True
try:
_running_instances.remove(self)
except KeyError as ex:
log.error(
f"Logical error, communicator {self.local_endpoint.name} is not started: {secure_format_exception(ex)}"
)
log.debug(f"Communicator endpoint: {self.local_endpoint.name} has stopped")
def register_monitor(self, monitor: EndpointMonitor):
"""Register a monitor for endpoint lifecycle changes
This monitor is notified for any state changes of all the endpoints.
Multiple monitors can be registered.
Args:
monitor: The class that receives the endpoint state change notification
Raises:
CommError: If any error happens while sending the request
"""
self.conn_manager.add_endpoint_monitor(monitor)
def find_endpoint(self, name: str) -> Optional[Endpoint]:
"""Find endpoint by name
Args:
name: Endpoint name
Returns:
The endpoint if found. None if not found
"""
return self.conn_manager.find_endpoint(name)
def remove_endpoint(self, name: str):
"""Remove endpoint and close all the connections associated with it
Args:
name: Endpoint name
"""
return self.conn_manager.remove_endpoint(name)
def send(self, endpoint: Endpoint, app_id: int, message: Message):
"""Send a message to endpoint for app_id, no response is expected
Args:
endpoint: An endpoint to send the request to
app_id: Application ID
message: Message to send
Raises:
CommError: If any error happens while sending the data
"""
self.conn_manager.send_message(endpoint, app_id, message.headers, message.payload)
def register_message_receiver(self, app_id: int, receiver: MessageReceiver):
"""Register a receiver to process FCI message for the app
Args:
app_id: Application ID
receiver: The receiver to process the message
Raises:
CommError: If duplicate endpoint/app or receiver is of wrong type
"""
self.conn_manager.register_message_receiver(app_id, receiver)
def add_connector(self, url: str, mode: Mode, secure: bool = False) -> str:
"""Load a connector. The driver is selected based on the URL
Args:
url: The url to listen on or connect to, like "https://0:443". Use 0 for empty host
mode: Active for connecting, Passive for listening
secure: True if SSL is required.
Returns:
A handle that can be used to delete connector
Raises:
CommError: If any errors
"""
if not driver_loaded:
load_comm_drivers()
driver_class = driver_mgr.find_driver_class(url)
if not driver_class:
raise CommError(CommError.NOT_SUPPORTED, f"No driver found for URL {url}")
params = parse_url(url)
return self.add_connector_advanced(driver_class(), mode, params, secure, False)
def start_listener(self, scheme: str, resources: dict) -> (str, str):
"""Add and start a connector in passive mode on an address selected by the driver.
Args:
scheme: Connection scheme, e.g. http, https
resources: User specified resources like host and port ranges
Returns:
A tuple with connector handle and connect url
Raises:
CommError: If any errors like invalid host or port not available
"""
if not driver_loaded:
load_comm_drivers()
driver_class = driver_mgr.find_driver_class(scheme)
if not driver_class:
raise CommError(CommError.NOT_SUPPORTED, f"No driver found for scheme {scheme}")
connect_url, listening_url = driver_class.get_urls(scheme, resources)
params = parse_url(listening_url)
handle = self.add_connector_advanced(driver_class(), Mode.PASSIVE, params, False, True)
return handle, connect_url
def add_connector_advanced(
self, driver: Driver, mode: Mode, params: dict, secure: bool, start: bool = False
) -> str:
"""Add a connector using a specific driver instance.
Args:
driver: A transport driver instance
mode: Active or passive
params: Driver parameters
secure: SSL is required if true
start: Start the connector if true
Returns:
A handle that can be used to delete the connector
Raises:
CommError: If any errors
"""
if self.local_endpoint.conn_props:
params.update(self.local_endpoint.conn_props)
if secure:
params[DriverParams.SECURE] = secure
handle = self.conn_manager.add_connector(driver, params, mode)
if not start:
return handle
connector = self.conn_manager.connectors.get(handle, None)
if not connector:
log.error(f"Connector {driver.get_name()}:{handle} is not found")
raise CommError(CommError.ERROR, f"Logic error. Connector {driver.get_name()}:{handle} not found")
self.conn_manager.start_connector(connector)
return handle
def remove_connector(self, handle: str):
"""Remove the connector
Args:
handle: The connector handle
Raises:
CommError: If any errors
"""
self.conn_manager.remove_connector(handle)
def _exit_func():
while _running_instances:
c = next(iter(_running_instances))
# This call will remove the entry from the set
c.stop()
log.debug(f"Communicator {c.local_endpoint.name} was left running, stopped on exit")
atexit.register(_exit_func)
| NVFlare-main | nvflare/fuel/f3/communicator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.blob_streamer import BlobStreamer
from nvflare.fuel.f3.streaming.byte_receiver import ByteReceiver
from nvflare.fuel.f3.streaming.byte_streamer import ByteStreamer
from nvflare.fuel.f3.streaming.file_streamer import FileStreamer
from nvflare.fuel.f3.streaming.object_streamer import ObjectStreamer
from nvflare.fuel.f3.streaming.stream_types import ObjectIterator, ObjectStreamFuture, Stream, StreamError, StreamFuture
class StreamCell:
def __init__(self, cell: CoreCell):
self.cell = cell
self.byte_streamer = ByteStreamer(cell)
self.byte_receiver = ByteReceiver(cell)
self.blob_streamer = BlobStreamer(self.byte_streamer, self.byte_receiver)
self.file_streamer = FileStreamer(self.byte_streamer, self.byte_receiver)
self.object_streamer = ObjectStreamer(self.blob_streamer)
@staticmethod
def get_chunk_size():
"""Get the default chunk size used by StreamCell
Byte stream are broken into chunks of this size before sending over Cellnet
"""
return ByteStreamer.get_chunk_size()
def send_stream(self, channel: str, topic: str, target: str, message: Message, secure=False) -> StreamFuture:
"""
Send a byte-stream over a channel/topic asynchronously. The streaming is performed in a different thread.
The streamer will read from stream and send the data in chunks till the stream reaches EOF.
Args:
channel: channel for the stream
topic: topic for the stream
target: destination cell FQCN
message: The payload is the stream to send
secure: Send the message with end-end encryption if True
Returns: StreamFuture that can be used to check status/progress, or register callbacks.
The future result is the number of bytes sent
"""
if not isinstance(message.payload, Stream):
raise StreamError(f"Message payload is not a stream: {type(message.payload)}")
return self.byte_streamer.send(channel, topic, target, message.headers, message.payload, secure)
def register_stream_cb(self, channel: str, topic: str, stream_cb: Callable, *args, **kwargs):
"""
Register a callback for reading stream. The stream_cb must have the following signature,
stream_cb(future: StreamFuture, stream: Stream, resume: bool, *args, **kwargs) -> int
future: The future represents the ongoing streaming. It's done when streaming is complete.
stream: The stream to read the receiving data from
resume: True if this is a restarted stream
It returns the offset to resume from if this is a restarted stream
The resume_cb returns the offset to resume from:
resume_cb(stream_id: str, *args, **kwargs) -> int
If None, the stream is not resumable.
Args:
channel: the channel of the request
topic: topic of the request
stream_cb: The callback to handle the stream. This is called when a stream is started. It also
provides restart offset for restarted streams. This CB is invoked in a dedicated thread,
and it can block
*args: positional args to be passed to the callbacks
**kwargs: keyword args to be passed to the callbacks
"""
self.byte_receiver.register_callback(channel, topic, stream_cb, *args, **kwargs)
def send_blob(self, channel: str, topic: str, target: str, message: Message, secure=False) -> StreamFuture:
"""
Send a BLOB (Binary Large Object) to the target. The payload of message is the BLOB. The BLOB must fit in
memory on the receiving end.
Args:
channel: channel for the message
topic: topic of the message
target: destination cell IDs
message: the headers and the blob as payload
secure: Send the message with end-end encryption if True
Returns: StreamFuture that can be used to check status/progress and get result
The future result is the total number of bytes sent
"""
if message.payload is None:
message.payload = bytes(0)
if not isinstance(message.payload, (bytes, bytearray, memoryview)):
raise StreamError(f"Message payload is not a byte array: {type(message.payload)}")
return self.blob_streamer.send(channel, topic, target, message, secure)
def register_blob_cb(self, channel: str, topic: str, blob_cb, *args, **kwargs):
"""
Register a callback for receiving the blob. This callback is invoked when the whole
blob is received. If streaming fails, the streamer will try again. The failed streaming
is ignored.
The callback must have the following signature,
blob_cb(future: StreamFuture, *args, **kwargs)
The future's result is the final BLOB received
Args:
channel: the channel of the request
topic: topic of the request
blob_cb: The callback to handle the stream
"""
self.blob_streamer.register_blob_callback(channel, topic, blob_cb, *args, **kwargs)
def send_file(self, channel: str, topic: str, target: str, message: Message, secure=False) -> StreamFuture:
"""
Send a file to target using stream API.
Args:
channel: channel for the message
topic: topic for the message
target: destination cell FQCN
message: the headers and the full path of the file to be sent as payload
secure: Send the message with end-end encryption if True
Returns: StreamFuture that can be used to check status/progress and get the total bytes sent
"""
if not isinstance(message.payload, str):
raise StreamError(f"Message payload is not a file name: {type(message.payload)}")
file_name = message.payload
if not os.path.isfile(file_name) or not os.access(file_name, os.R_OK):
raise StreamError(f"File {file_name} doesn't exist or isn't readable")
return self.file_streamer.send(channel, topic, target, message, secure)
def register_file_cb(self, channel: str, topic: str, file_cb, *args, **kwargs):
"""
Register callbacks for file receiving. The callbacks must have the following signatures,
file_cb(future: StreamFuture, file_name: str, *args, **kwargs) -> str
The future represents the file receiving task and the result is the final file path
It returns the full path where the file will be written to
Args:
channel: the channel of the request
topic: topic of the request
file_cb: This CB is called when file transfer starts
"""
self.file_streamer.register_file_callback(channel, topic, file_cb, *args, **kwargs)
def send_objects(self, channel: str, topic: str, target: str, message: Message, secure=False) -> ObjectStreamFuture:
"""
Send a list of objects to the destination. Each object is sent as BLOB, so it must fit in memory
Args:
channel: channel for the message
topic: topic of the message
target: destination cell IDs
message: Headers and the payload which is an iterator that provides next object
secure: Send the message with end-end encryption if True
Returns: ObjectStreamFuture that can be used to check status/progress, or register callbacks
"""
if not isinstance(message.payload, ObjectIterator):
raise StreamError(f"Message payload is not an object iterator: {type(message.payload)}")
return self.object_streamer.stream_objects(channel, topic, target, message.headers, message.payload, secure)
def register_objects_cb(
self, channel: str, topic: str, object_stream_cb: Callable, object_cb: Callable, *args, **kwargs
):
"""
Register callback for receiving the object. The callback signature is,
objects_stream_cb(future: ObjectStreamFuture, resume: bool, *args, **kwargs) -> int
future: It represents the streaming of all objects. An object CB can be registered with the future
to receive each object.
resume: True if this is a restarted stream
This CB returns the index to restart if this is a restarted stream
object_cb(obj_sid: str, index: int, message: Message, *args, ** kwargs)
obj_sid: Object Stream ID
index: The index of the object
message: The header and payload is the object
resume_cb(stream_id: str, *args, **kwargs) -> int
is received. The index starts from 0. The callback must have the following signature,
objects_cb(future: ObjectStreamFuture, index: int, object: Any, headers: Optional[dict], *args, **kwargs)
resume_cb(stream_id: str, *args, **kwargs) -> int
Args:
channel: the channel of the request
topic: topic of the request
object_stream_cb: The callback when an object stream is started
object_cb: The callback is invoked when each object is received
"""
self.object_streamer.register_object_callbacks(channel, topic, object_stream_cb, object_cb, args, kwargs)
| NVFlare-main | nvflare/fuel/f3/stream_cell.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import logging
import os
import random
import resource
import threading
import time
from abc import ABC
from typing import List, Union
from nvflare.fuel.f3.cellnet.cell import Cell
from nvflare.fuel.f3.cellnet.connector_manager import ConnectorData
from nvflare.fuel.f3.cellnet.core_cell import Message
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey, ReturnCode
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.utils import make_reply
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.fuel.utils.config_service import ConfigService
_CHANNEL = "_net_manager"
_TOPIC_PEERS = "peers"
_TOPIC_CELLS = "cells"
_TOPIC_ROUTE = "route"
_TOPIC_START_ROUTE = "start_route"
_TOPIC_STOP = "stop"
_TOPIC_STOP_CELL = "stop_cell"
_TOPIC_URL_USE = "url_use"
_TOPIC_CONNS = "conns"
_TOPIC_SPEED = "speed"
_TOPIC_ECHO = "echo"
_TOPIC_STRESS = "stress"
_TOPIC_CHANGE_ROOT = "change_root"
_TOPIC_BULK_TEST = "bulk_test"
_TOPIC_BULK_ITEM = "bulk_item"
_TOPIC_MSG_STATS = "msg_stats"
_TOPIC_LIST_POOLS = "list_pools"
_TOPIC_SHOW_POOL = "show_pool"
_TOPIC_COMM_CONFIG = "comm_config"
_TOPIC_CONFIG_VARS = "config_vars"
_TOPIC_PROCESS_INFO = "process_info"
_TOPIC_HEARTBEAT = "heartbeat"
_ONE_K = bytes([1] * 1024)
class _Member:
STATE_UNKNOWN = 0
STATE_ONLINE = 1
STATE_OFFLINE = 2
def __init__(self, fqcn):
self.fqcn = fqcn
self.state = _Member.STATE_UNKNOWN
self.last_heartbeat_time = time.time()
self.lock = threading.Lock()
class SubnetMonitor(ABC):
def __init__(self, subnet_id: str, member_cells: List[str], trouble_alert_threshold: float):
if not member_cells:
raise ValueError("member cells must not be empty")
self.agent = None
self.subnet_id = subnet_id
self.trouble_alert_threshold = trouble_alert_threshold
self.lock = threading.Lock()
self.members = {}
for m in member_cells:
self.members[m] = _Member(m)
def member_online(self, member_cell_fqcn: str):
pass
def member_offline(self, member_cell_fqcn: str):
pass
def put_member_online(self, member: _Member):
with self.lock:
member.last_heartbeat_time = time.time()
current_state = member.state
member.state = member.STATE_ONLINE
if current_state in [member.STATE_UNKNOWN, member.STATE_OFFLINE]:
self.member_online(member.fqcn)
def put_member_offline(self, member: _Member):
with self.lock:
if time.time() - member.last_heartbeat_time <= self.trouble_alert_threshold:
return
if member.state in [member.STATE_ONLINE]:
self.member_offline(member.fqcn)
member.state = member.STATE_OFFLINE
def stop_subnet(self):
if not self.agent:
raise RuntimeError("No NetAgent in this monitor. Make sure the monitor is added to a NetAgent.")
return self.agent.stop_subnet(self)
class NetAgent:
def __init__(self, cell, change_root_cb=None, agent_closed_cb=None):
if isinstance(cell, Cell):
cell = cell.core_cell
self.cell = cell
self.change_root_cb = change_root_cb
self.agent_closed_cb = agent_closed_cb
self.logger = logging.getLogger(self.__class__.__name__)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_CELLS,
cb=self._do_report_cells,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_ROUTE,
cb=self._do_route,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_START_ROUTE,
cb=self._do_start_route,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_STOP,
cb=self._do_stop,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_STOP_CELL,
cb=self._do_stop_cell,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_PEERS,
cb=self._do_peers,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_CONNS,
cb=self._do_connectors,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_URL_USE,
cb=self._do_url_use,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_SPEED,
cb=self._do_speed,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_ECHO,
cb=self._do_echo,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_STRESS,
cb=self._do_stress,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_CHANGE_ROOT,
cb=self._do_change_root,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_BULK_TEST,
cb=self._do_bulk_test,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_BULK_ITEM,
cb=self._do_bulk_item,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_MSG_STATS,
cb=self._do_msg_stats,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_LIST_POOLS,
cb=self._do_list_pools,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_SHOW_POOL,
cb=self._do_show_pool,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_COMM_CONFIG,
cb=self._do_comm_config,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_CONFIG_VARS,
cb=self._do_config_vars,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_PROCESS_INFO,
cb=self._do_process_info,
)
cell.register_request_cb(
channel=_CHANNEL,
topic=_TOPIC_HEARTBEAT,
cb=self._do_heartbeat,
)
self.heartbeat_thread = None
self.monitor_thread = None
self.asked_to_close = False
self.subnets = {}
self.monitors = {}
self.hb_lock = threading.Lock()
self.monitor_lock = threading.Lock()
def add_to_subnet(self, subnet_id: str, monitor_fqcn: str = FQCN.ROOT_SERVER):
with self.hb_lock:
self.subnets[subnet_id] = monitor_fqcn
if self.heartbeat_thread is None:
self.heartbeat_thread = threading.Thread(target=self._subnet_heartbeat)
self.heartbeat_thread.start()
def add_subnet_monitor(self, monitor: SubnetMonitor):
if not isinstance(monitor, SubnetMonitor):
raise ValueError(f"monitor must be SubnetMonitor but got {type(monitor)}")
if monitor.subnet_id in self.monitors:
raise ValueError(f"monitor for subnet {monitor.subnet_id} already exists")
monitor.agent = self
with self.monitor_lock:
self.monitors[monitor.subnet_id] = monitor
if self.monitor_thread is None:
self.monitor_thread = threading.Thread(target=self._monitor_subnet)
self.monitor_thread.start()
def stop_subnet(self, monitor: SubnetMonitor):
cells_to_stop = []
for member_fqcn, member in monitor.members.items():
if member.state == member.STATE_ONLINE:
cells_to_stop.append(member_fqcn)
if cells_to_stop:
return self.cell.broadcast_request(
channel=_CHANNEL, topic=_TOPIC_STOP_CELL, request=Message(), targets=cells_to_stop, timeout=1.0
)
else:
return None
def delete_subnet_monitor(self, subnet_id: str):
with self.monitor_lock:
self.monitors.pop(subnet_id, None)
def close(self):
if self.asked_to_close:
return
self.asked_to_close = True
if self.heartbeat_thread and self.heartbeat_thread.is_alive():
self.heartbeat_thread.join()
if self.monitor_thread and self.monitor_thread.is_alive():
self.monitor_thread.join()
if self.agent_closed_cb:
self.agent_closed_cb()
def _subnet_heartbeat(self):
cc = self.cell.comm_configurator
interval = cc.get_subnet_heartbeat_interval(5.0)
if interval <= 0:
interval = 5.0
while True:
with self.hb_lock:
for subnet_id, target in self.subnets.items():
self.cell.fire_and_forget(
channel=_CHANNEL,
topic=_TOPIC_HEARTBEAT,
targets=target,
message=Message(payload={"subnet_id": subnet_id}),
)
# wait for interval time, but watch for "asked_to_stop" every 0.1 secs
start = time.time()
while True:
time.sleep(0.1)
if self.asked_to_close:
return
if time.time() - start >= interval:
break
@staticmethod
def _check_monitor(m: SubnetMonitor):
for member_fqcn, member in m.members.items():
m.put_member_offline(member)
def _monitor_subnet(self):
while not self.asked_to_close:
with self.monitor_lock:
monitors = copy.copy(self.monitors)
for _, m in monitors.items():
self._check_monitor(m)
time.sleep(0.5)
def _do_heartbeat(self, request: Message) -> Union[None, Message]:
origin = request.get_header(MessageHeaderKey.ORIGIN, "?")
if not self.monitors:
self.logger.warning(f"got subnet heartbeat from {origin} but no monitors")
return
payload = request.payload
assert isinstance(payload, dict)
subnet_id = payload.get("subnet_id", "")
m = self.monitors.get(subnet_id)
if not m:
self.logger.warning(f"got subnet heartbeat from {origin} for subnet_id {subnet_id} but no monitor")
return
assert isinstance(m, SubnetMonitor)
member = m.members.get(origin)
if not member:
self.logger.warning(f"got subnet heartbeat from {origin} for subnet_id {subnet_id} but it's not a member")
return
m.put_member_online(member)
def _do_stop(self, request: Message) -> Union[None, Message]:
self.stop()
return None
def _do_stop_cell(self, request: Message) -> Union[None, Message]:
self.stop()
return Message()
def _do_route(self, request: Message) -> Union[None, Message]:
return Message(payload=dict(request.headers))
def _do_start_route(self, request: Message) -> Union[None, Message]:
target_fqcn = request.payload
err = FQCN.validate(target_fqcn)
if err:
return make_reply(ReturnCode.PROCESS_EXCEPTION, f"bad target fqcn {err}")
assert isinstance(target_fqcn, str)
reply_headers, req_headers = self.get_route_info(target_fqcn)
return Message(payload={"request": dict(req_headers), "reply": dict(reply_headers)})
def _do_peers(self, request: Message) -> Union[None, Message]:
return Message(payload=list(self.cell.agents.keys()))
def get_peers(self, target_fqcn: str) -> (Union[None, dict], List[str]):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_PEERS, target=target_fqcn, timeout=1.0, request=Message()
)
err = ""
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc == ReturnCode.OK:
result = reply.payload
if not isinstance(result, list):
err = f"reply payload should be list but got {type(reply.payload)}"
result = None
else:
result = None
err = f"return code: {rc}"
if err:
return {"error": err, "reply": reply.headers}, None
else:
return None, result
@staticmethod
def _connector_info(info: ConnectorData) -> dict:
return {"url": info.connect_url, "handle": info.handle, "type": "connector" if info.active else "listener"}
def _get_connectors(self) -> dict:
cell = self.cell
result = {}
if cell.int_listener:
result["int_listener"] = self._connector_info(cell.int_listener)
if cell.ext_listeners:
listeners = [self._connector_info(x) for _, x in cell.ext_listeners.items()]
result["ext_listeners"] = listeners
if cell.bb_ext_connector:
result["bb_ext_connector"] = self._connector_info(cell.bb_ext_connector)
if cell.bb_int_connector:
result["bb_int_connector"] = self._connector_info(cell.bb_int_connector)
if cell.adhoc_connectors:
conns = {}
for k, v in cell.adhoc_connectors.items():
conns[k] = self._connector_info(v)
result["adhoc_connectors"] = conns
return result
def _do_connectors(self, request: Message) -> Union[None, Message]:
return Message(payload=self._get_connectors())
def get_connectors(self, target_fqcn: str) -> (dict, dict):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_CONNS, target=target_fqcn, timeout=1.0, request=Message()
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc == ReturnCode.OK:
result = reply.payload
if not isinstance(result, dict):
return {
"error": f"reply payload should be dict but got {type(reply.payload)}",
"reply": reply.headers,
}, {}
if not result:
return {}, {}
else:
return {}, result
else:
return {"error": "processing error", "reply": reply.headers}, {}
def request_cells_info(self) -> (str, List[str]):
result = [self.cell.get_fqcn()]
err = ""
replies = self._broadcast_to_subs(topic=_TOPIC_CELLS)
for t, r in replies.items():
assert isinstance(r, Message)
rc = r.get_header(MessageHeaderKey.RETURN_CODE)
if rc == ReturnCode.OK:
sub_result = r.payload
result.extend(sub_result)
else:
err = f"no reply from {t}: {rc}"
result.append(err)
return err, result
def _get_url_use_of_cell(self, url: str):
cell = self.cell
if cell.int_listener and cell.int_listener.connect_url == url:
return "int_listen"
if cell.ext_listeners:
for k in cell.ext_listeners.keys():
if k == url:
return "ext_listen"
if cell.bb_ext_connector and cell.bb_ext_connector.connect_url == url:
return "bb_ext_connect"
if cell.bb_int_connector and cell.bb_int_connector.connect_url == url:
return "int_connect"
if cell.adhoc_connectors:
for _, h in cell.adhoc_connectors.items():
if h.connect_url == url:
return "adhoc_connect"
return "none"
def get_url_use(self, url) -> dict:
result = {self.cell.get_fqcn(): self._get_url_use_of_cell(url)}
replies = self._broadcast_to_subs(topic=_TOPIC_URL_USE, message=Message(payload=url))
for t, r in replies.items():
assert isinstance(r, Message)
rc = r.get_header(MessageHeaderKey.RETURN_CODE)
if rc == ReturnCode.OK:
if not isinstance(r.payload, dict):
result[t] = f"bad reply type {type(r.payload)}"
else:
result.update(r.payload)
else:
result[t] = f"error {rc}"
return result
def _do_url_use(self, request: Message) -> Union[None, Message]:
results = self.get_url_use(request.payload)
return Message(payload=results)
def get_route_info(self, target_fqcn: str) -> (dict, dict):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_ROUTE, target=target_fqcn, timeout=1.0, request=Message()
)
reply_headers = reply.headers
rc = reply.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc == ReturnCode.OK:
if not isinstance(reply.payload, dict):
return reply_headers, {"error": f"reply payload got {type(reply.payload)}"}
return reply_headers, reply.payload
else:
return reply_headers, {"error": f"Reply ReturnCode: {rc}"}
def start_route(self, from_fqcn: str, target_fqcn: str) -> (str, dict, dict):
err = ""
reply_headers = {}
req_headers = {}
reply = self.cell.send_request(
channel=_CHANNEL,
topic=_TOPIC_START_ROUTE,
target=from_fqcn,
timeout=1.0,
request=Message(payload=target_fqcn),
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc == ReturnCode.OK:
result = reply.payload
if not isinstance(result, dict):
err = f"reply payload should be dict but got {type(reply.payload)}"
else:
reply_headers = result.get("reply")
req_headers = result.get("request")
else:
err = f"error in reply {rc}"
reply_headers = reply.headers
return err, reply_headers, req_headers
def _do_report_cells(self, request: Message) -> Union[None, Message]:
_, results = self.request_cells_info()
return Message(payload=results)
def stop(self):
# ask all children to stop
self._broadcast_to_subs(topic=_TOPIC_STOP, timeout=0.0)
self.close()
def stop_cell(self, target: str) -> str:
# if self.cell.get_fqcn() == target:
# self.stop()
# return ReturnCode.OK
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_STOP_CELL, request=Message(), target=target, timeout=1.0
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
return rc
def _request_speed_test(self, target_fqcn: str, num, size) -> Message:
start = time.perf_counter()
payload = bytes(_ONE_K * size)
payload_size = len(payload)
h = hashlib.md5(payload)
dig1 = h.digest()
end = time.perf_counter()
payload_prep_time = end - start
errs = 0
timeouts = 0
comm_errs = 0
proc_errs = 0
size_errs = 0
start = time.perf_counter()
for i in range(num):
r = self.cell.send_request(
channel=_CHANNEL,
topic=_TOPIC_ECHO,
target=target_fqcn,
request=Message(payload=payload),
timeout=10.0,
)
rc = r.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc == ReturnCode.OK:
if len(r.payload) != payload_size:
self.cell.logger.error(
f"{self.cell.get_fqcn()}: expect {payload_size} bytes but received {len(r.payload)}"
)
proc_errs += 1
else:
h = hashlib.md5(r.payload)
dig2 = h.digest()
if dig1 != dig2:
self.cell.logger.error(f"{self.cell.get_fqcn()}: digest mismatch!")
proc_errs += 1
elif rc == ReturnCode.TIMEOUT:
timeouts += 1
elif rc == ReturnCode.COMM_ERROR:
comm_errs += 1
elif rc == ReturnCode.MSG_TOO_BIG:
size_errs += 1
else:
errs += 1
end = time.perf_counter()
total = end - start
avg = total / num
return Message(
payload={
"test": f"{size:,}KB {num} rounds between {self.cell.get_fqcn()} and {target_fqcn}",
"prep": payload_prep_time,
"timeouts": timeouts,
"comm_errors": comm_errs,
"size_errors": size_errs,
"proc_errors": proc_errs,
"other_errors": errs,
"total": total,
"average": avg,
}
)
def _do_speed(self, request: Message) -> Union[None, Message]:
params = request.payload
if not isinstance(params, dict):
return make_reply(ReturnCode.INVALID_REQUEST, f"request body must be dict but got {type(params)}")
to_fqcn = params.get("to")
if not to_fqcn:
return make_reply(ReturnCode.INVALID_REQUEST, "missing 'to' param in request")
err = FQCN.validate(to_fqcn)
if err:
return make_reply(ReturnCode.INVALID_REQUEST, f"bad target FQCN: {err}")
num = params.get("num", 100)
size = params.get("size", 1000)
if size <= 0:
size = 1000
if num <= 0:
num = 100
return self._request_speed_test(to_fqcn, num, size)
def _do_echo(self, request: Message) -> Union[None, Message]:
return Message(payload=request.payload)
def _do_stress_test(self, params):
if not isinstance(params, dict):
return {"error": f"bad params - expect dict but got {type(params)}"}
targets = params.get("targets")
if not targets:
return {"error": "no targets specified"}
num_rounds = params.get("num")
if not num_rounds:
return {"error": "missing num of rounds"}
my_fqcn = self.cell.get_fqcn()
if my_fqcn in targets:
targets.remove(my_fqcn)
if not targets:
return {"error": "no targets to try"}
counts = {}
errors = {}
start = time.perf_counter()
for i in range(num_rounds):
payload = os.urandom(1024)
h = hashlib.md5(payload)
d1 = h.digest()
target = targets[random.randrange(len(targets))]
req = Message(payload=payload)
reply = self.cell.send_request(channel=_CHANNEL, topic=_TOPIC_ECHO, target=target, request=req, timeout=1.0)
if target not in counts:
counts[target] = 0
counts[target] += 1
rc = reply.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc != ReturnCode.OK:
self.cell.logger.error(f"{self.cell.get_fqcn()}: return code from {target}: {rc}")
if target not in errors:
errors[target] = 0
errors[target] += 1
else:
h = hashlib.md5(reply.payload)
d2 = h.digest()
if d1 != d2:
self.cell.logger.error(f"{self.cell.get_fqcn()}: digest mismatch from {target}")
if target not in errors:
errors[target] = 0
errors[target] += 1
end = time.perf_counter()
return {"counts": counts, "errors": errors, "time": end - start}
def _do_stress(self, request: Message) -> Union[None, Message]:
params = request.payload
result = self._do_stress_test(params)
return Message(payload=result)
def start_stress_test(self, targets: list, num_rounds=10, timeout=5.0):
self.cell.logger.info(f"{self.cell.get_fqcn()}: starting stress test on {targets}")
result = {}
payload = {"targets": targets, "num": num_rounds}
msg_targets = [x for x in targets]
my_fqcn = self.cell.get_fqcn()
if my_fqcn in msg_targets:
msg_targets.remove(my_fqcn)
if not msg_targets:
return {"error": "no targets for stress test"}
replies = self.cell.broadcast_request(
channel=_CHANNEL,
topic=_TOPIC_STRESS,
targets=msg_targets,
request=Message(payload=payload),
timeout=timeout,
)
for t, r in replies.items():
rc = r.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc != ReturnCode.OK:
result[t] = f"RC={rc}"
else:
result[t] = r.payload
return result
def speed_test(self, from_fqcn: str, to_fqcn: str, num_tries, payload_size) -> dict:
err = FQCN.validate(from_fqcn)
if err:
return {"error": f"invalid from_fqcn {from_fqcn}: {err}"}
err = FQCN.validate(to_fqcn)
if err:
return {"error": f"invalid to_fqcn {to_fqcn}: {err}"}
result = {}
start = time.perf_counter()
reply = self.cell.send_request(
channel=_CHANNEL,
topic=_TOPIC_SPEED,
request=Message(payload={"to": to_fqcn, "num": num_tries, "size": payload_size}),
target=from_fqcn,
timeout=100.0,
)
end = time.perf_counter()
result["test_time"] = end - start
rc = reply.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc != ReturnCode.OK:
result.update({"error": f"return code {rc}"})
elif not isinstance(reply.payload, dict):
result.update({"error": f"bad reply: expect dict but got {type(reply.payload)}"})
else:
result.update(reply.payload)
return result
def change_root(self, new_root_url: str):
self._broadcast_to_subs(topic=_TOPIC_CHANGE_ROOT, message=Message(payload=new_root_url), timeout=0.0)
def _do_change_root(self, request: Message) -> Union[None, Message]:
new_root_url = request.payload
assert isinstance(new_root_url, str)
self.change_root(new_root_url)
if self.change_root_cb is not None:
self.change_root_cb(new_root_url)
return None
def start_bulk_test(self, targets: list, size: int):
self.cell.logger.info(f"{self.cell.get_fqcn()}: starting bulk test on {targets}")
msg_targets = [x for x in targets]
my_fqcn = self.cell.get_fqcn()
if my_fqcn in msg_targets:
msg_targets.remove(my_fqcn)
if not msg_targets:
return {"error": "no targets for bulk test"}
result = {}
replies = self.cell.broadcast_request(
channel=_CHANNEL,
topic=_TOPIC_BULK_TEST,
targets=msg_targets,
request=Message(payload=size),
timeout=1.0,
)
for t, r in replies.items():
rc = r.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
if rc != ReturnCode.OK:
result[t] = f"RC={rc}"
else:
result[t] = r.payload
return result
def _do_bulk_test(self, request: Message) -> Union[None, Message]:
size = request.payload
assert isinstance(size, int)
nums = []
for _ in range(size):
num = random.randint(0, 100)
nums.append(num)
msg = Message(payload=num)
self.cell.queue_message(
channel=_CHANNEL,
topic=_TOPIC_BULK_ITEM,
targets=FQCN.ROOT_SERVER,
message=msg,
)
return Message(payload=f"queued: {nums}")
def _do_bulk_item(self, request: Message) -> Union[None, Message]:
num = request.payload
origin = request.get_header(MessageHeaderKey.ORIGIN)
self.cell.logger.info(f"{self.cell.get_fqcn()}: got {num} from {origin}")
return None
def get_msg_stats_table(self, target: str, mode: str):
reply = self.cell.send_request(
channel=_CHANNEL,
topic=_TOPIC_MSG_STATS,
request=Message(payload={"mode": mode}),
timeout=1.0,
target=target,
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc != ReturnCode.OK:
return f"error: {rc}"
return reply.payload
def _do_msg_stats(self, request: Message) -> Union[None, Message]:
p = request.payload
assert isinstance(p, dict)
mode = p.get("mode")
headers, rows = self.cell.msg_stats_pool.get_table(mode)
reply = {"headers": headers, "rows": rows}
return Message(payload=reply)
def get_pool_list(self, target: str):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_LIST_POOLS, request=Message(), timeout=1.0, target=target
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
err = reply.get_header(MessageHeaderKey.ERROR, "")
if rc != ReturnCode.OK:
return f"{rc}: {err}"
return reply.payload
def _do_list_pools(self, request: Message) -> Union[None, Message]:
headers, rows = StatsPoolManager.get_table()
reply = {"headers": headers, "rows": rows}
return Message(payload=reply)
def show_pool(self, target: str, pool_name: str, mode: str):
reply = self.cell.send_request(
channel=_CHANNEL,
topic=_TOPIC_SHOW_POOL,
request=Message(payload={"mode": mode, "pool": pool_name}),
timeout=1.0,
target=target,
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc != ReturnCode.OK:
err = reply.get_header(MessageHeaderKey.ERROR, "")
return f"{rc}: {err}"
return reply.payload
def _do_show_pool(self, request: Message) -> Union[None, Message]:
p = request.payload
assert isinstance(p, dict)
pool_name = p.get("pool", "")
mode = p.get("mode", "")
pool = StatsPoolManager.get_pool(pool_name)
if not pool:
return Message(
headers={
MessageHeaderKey.RETURN_CODE: ReturnCode.INVALID_REQUEST,
MessageHeaderKey.ERROR: f"unknown pool '{pool_name}'",
}
)
headers, rows = pool.get_table(mode)
reply = {"headers": headers, "rows": rows}
return Message(payload=reply)
def get_comm_config(self, target: str):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_COMM_CONFIG, request=Message(), timeout=1.0, target=target
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc != ReturnCode.OK:
err = reply.get_header(MessageHeaderKey.ERROR, "")
return f"{rc}: {err}"
return reply.payload
def get_config_vars(self, target: str):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_CONFIG_VARS, request=Message(), timeout=1.0, target=target
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc != ReturnCode.OK:
err = reply.get_header(MessageHeaderKey.ERROR, "")
return f"{rc}: {err}"
return reply.payload
def get_process_info(self, target: str):
reply = self.cell.send_request(
channel=_CHANNEL, topic=_TOPIC_PROCESS_INFO, request=Message(), timeout=1.0, target=target
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
if rc != ReturnCode.OK:
err = reply.get_header(MessageHeaderKey.ERROR, "")
return f"{rc}: {err}"
return reply.payload
def _do_comm_config(self, request: Message) -> Union[None, Message]:
info = self.cell.connector_manager.get_config_info()
return Message(payload=info)
def _do_config_vars(self, request: Message) -> Union[None, Message]:
info = ConfigService.get_var_values()
return Message(payload=info)
def _do_process_info(self, request: Message) -> Union[None, Message]:
usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
rows = [
["Process ID", str(os.getpid())],
["Memory Usage", str(usage)],
["Thread Count", str(threading.active_count())],
]
for thread in threading.enumerate():
rows.append([f"Thread:{thread.ident}", thread.name])
return Message(payload={"headers": ["Resource", "Value"], "rows": rows})
def _broadcast_to_subs(self, topic: str, message=None, timeout=1.0):
if not message:
message = Message()
children, clients = self.cell.get_sub_cell_names()
targets = []
targets.extend(children)
targets.extend(clients)
if targets:
if timeout > 0.0:
if self.cell.my_info.is_root and self.cell.my_info.is_on_server:
timeout = timeout + 0.1
else:
timeout = timeout / self.cell.my_info.gen
return self.cell.broadcast_request(
channel=_CHANNEL, topic=topic, targets=targets, request=message, timeout=timeout
)
else:
self.cell.fire_and_forget(channel=_CHANNEL, topic=topic, targets=targets, message=message)
return {}
| NVFlare-main | nvflare/fuel/f3/cellnet/net_agent.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
from cryptography.x509 import Certificate
from nvflare.fuel.f3.cellnet.cell_cipher import SimpleCellCipher
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.endpoint import Endpoint
log = logging.getLogger(__name__)
CERT_ERROR = "cert_error"
CERT_TARGET = "cert_target"
CERT_ORIGIN = "cert_origin"
CERT_CONTENT = "cert_content"
CERT_CA_CONTENT = "cert_ca_content"
CERT_REQ_TIMEOUT = 10
class CredentialManager:
"""Helper class for secure message. It holds the local credentials and certificate cache"""
def __init__(self, local_endpoint: Endpoint):
self.local_endpoint = local_endpoint
self.cert_cache = {}
self.lock = threading.Lock()
conn_props = self.local_endpoint.conn_props
ca_cert_path = conn_props.get(DriverParams.CA_CERT)
server_cert_path = conn_props.get(DriverParams.SERVER_CERT)
if server_cert_path:
local_cert_path = server_cert_path
local_key_path = conn_props.get(DriverParams.SERVER_KEY)
else:
local_cert_path = conn_props.get(DriverParams.CLIENT_CERT)
local_key_path = conn_props.get(DriverParams.CLIENT_KEY)
if not local_cert_path:
log.debug("Certificate is not configured, secure message is not supported")
self.ca_cert = None
self.local_cert = None
self.local_key = None
self.cell_cipher = None
else:
self.ca_cert = self.read_file(ca_cert_path)
self.local_cert = self.read_file(local_cert_path)
self.local_key = self.read_file(local_key_path)
self.cell_cipher = SimpleCellCipher(self.get_ca_cert(), self.get_local_key(), self.get_local_cert())
if not self.local_cert:
log.debug("Certificate is not configured, secure message is not supported")
self.cell_cipher = None
else:
self.cell_cipher = SimpleCellCipher(self.get_ca_cert(), self.get_local_key(), self.get_local_cert())
def encrypt(self, target_cert: bytes, payload: bytes) -> bytes:
if not self.cell_cipher:
raise RuntimeError("Secure message not supported, Cell not running in secure mode")
return self.cell_cipher.encrypt(payload, x509.load_pem_x509_certificate(target_cert))
def decrypt(self, origin_cert: bytes, cipher: bytes) -> bytes:
if not self.cell_cipher:
raise RuntimeError("Secure message not supported, Cell not running in secure mode")
return self.cell_cipher.decrypt(cipher, x509.load_pem_x509_certificate(origin_cert))
def get_certificate(self, fqcn: str) -> bytes:
if not self.cell_cipher:
raise RuntimeError("This cell doesn't support certificate exchange, not running in secure mode")
target = FQCN.get_root(fqcn)
return self.cert_cache.get(target)
def save_certificate(self, fqcn: str, cert: bytes):
target = FQCN.get_root(fqcn)
self.cert_cache[target] = cert
def create_request(self, target: str) -> dict:
req = {
CERT_TARGET: target,
CERT_ORIGIN: FQCN.get_root(self.local_endpoint.name),
CERT_CONTENT: self.local_cert,
CERT_CA_CONTENT: self.ca_cert,
}
return req
def process_request(self, request: dict) -> dict:
target = request.get(CERT_TARGET)
origin = request.get(CERT_ORIGIN)
reply = {CERT_TARGET: target, CERT_ORIGIN: origin}
if not self.local_cert:
reply[CERT_ERROR] = f"Target {target} is not running in secure mode"
else:
cert = request.get(CERT_CONTENT)
# Save cert from requester in the cache
self.cert_cache[origin] = cert
reply[CERT_CONTENT] = self.local_cert
reply[CERT_CA_CONTENT] = self.ca_cert
return reply
@staticmethod
def process_response(reply: dict) -> bytes:
error = reply.get(CERT_ERROR)
if error:
raise RuntimeError(f"Request to get certificate from {target} failed: {error}")
return reply.get(CERT_CONTENT)
def get_local_cert(self) -> Certificate:
return x509.load_pem_x509_certificate(self.local_cert)
def get_local_key(self) -> RSAPrivateKey:
return serialization.load_pem_private_key(self.local_key, password=None)
def get_ca_cert(self) -> Certificate:
return x509.load_pem_x509_certificate(self.ca_cert)
@staticmethod
def read_file(file_name: str):
if not file_name:
return None
with open(file_name, "rb") as f:
return f.read()
| NVFlare-main | nvflare/fuel/f3/cellnet/credential_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CELLNET_PREFIX = "cn__"
class ConnectorRequirementKey:
URL = "url"
HOST = "host"
SECURE = "secure" # bool: secure or not
class MessageHeaderKey:
MSG_TYPE = CELLNET_PREFIX + "msg_type"
REQ_ID = CELLNET_PREFIX + "req_id"
REPLY_EXPECTED = CELLNET_PREFIX + "reply_expected"
TOPIC = CELLNET_PREFIX + "topic"
ORIGIN = CELLNET_PREFIX + "origin"
DESTINATION = CELLNET_PREFIX + "destination"
FROM_CELL = CELLNET_PREFIX + "from"
TO_CELL = CELLNET_PREFIX + "to"
CONN_URL = CELLNET_PREFIX + "conn_url"
CHANNEL = CELLNET_PREFIX + "channel"
RETURN_CODE = CELLNET_PREFIX + "return_code"
ERROR = CELLNET_PREFIX + "error"
PAYLOAD_ENCODING = CELLNET_PREFIX + "payload_encoding"
ROUTE = CELLNET_PREFIX + "route"
ORIGINAL_HEADERS = CELLNET_PREFIX + "original_headers"
SEND_TIME = CELLNET_PREFIX + "send_time"
RETURN_REASON = CELLNET_PREFIX + "return_reason"
SECURE = CELLNET_PREFIX + "secure"
PAYLOAD_LEN = CELLNET_PREFIX + "payload_len"
ENCRYPTED = CELLNET_PREFIX + "encrypted"
OPTIONAL = CELLNET_PREFIX + "optional"
class ReturnReason:
CANT_FORWARD = "cant_forward"
INTERCEPT = "intercept"
class MessagePropKey:
ENDPOINT = CELLNET_PREFIX + "endpoint"
COMMON_NAME = CELLNET_PREFIX + "common_name"
class Encoding:
BYTES = "bytes"
FOBS = "fobs" # FOBS coded
NONE = "none"
class ReturnCode:
OK = "ok"
TIMEOUT = "timeout"
INVALID_TARGET = "invalid_target"
TARGET_UNREACHABLE = "target_unreachable"
COMM_ERROR = "comm_error"
MSG_TOO_BIG = "msg_too_big"
FILTER_ERROR = "filter_error"
INVALID_REQUEST = "invalid_request"
PROCESS_EXCEPTION = "process_exception" # receiver error processing request
AUTHENTICATION_ERROR = "authentication_error"
SERVICE_UNAVAILABLE = "service_unavailable"
INVALID_SESSION = "invalid_session"
ABORT_RUN = "abort_run"
UNAUTHENTICATED = "unauthenticated"
ALL_RETURN_CODES = [
ReturnCode.OK,
ReturnCode.TIMEOUT,
ReturnCode.INVALID_TARGET,
ReturnCode.TARGET_UNREACHABLE,
ReturnCode.COMM_ERROR,
ReturnCode.MSG_TOO_BIG,
ReturnCode.FILTER_ERROR,
ReturnCode.INVALID_REQUEST,
ReturnCode.PROCESS_EXCEPTION,
ReturnCode.AUTHENTICATION_ERROR,
ReturnCode.SERVICE_UNAVAILABLE,
ReturnCode.INVALID_SESSION,
ReturnCode.ABORT_RUN,
ReturnCode.UNAUTHENTICATED,
]
class MessageType:
REQ = "req"
REPLY = "reply"
RETURN = "return" # return to sender due to forward error
class CellPropertyKey:
FQCN = "fqcn"
class TargetCellUnreachable(Exception):
pass
class AuthenticationError(Exception):
pass
class ServiceUnavailable(Exception):
pass
class InvalidSession(Exception):
pass
class AbortRun(Exception):
pass
class InvalidRequest(Exception):
pass
| NVFlare-main | nvflare/fuel/f3/cellnet/defs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List
class FQCN:
SEPARATOR = "."
ROOT_SERVER = "server"
@staticmethod
def normalize(fqcn: str) -> str:
return fqcn.strip()
@staticmethod
def split(fqcn: str) -> List[str]:
return fqcn.split(FQCN.SEPARATOR)
@staticmethod
def join(path: List[str]) -> str:
return FQCN.SEPARATOR.join(path)
@staticmethod
def validate(fqcn) -> str:
if not isinstance(fqcn, str):
return f"must be str but got {type(fqcn)}"
fqcn = FQCN.normalize(fqcn)
if not fqcn:
return "empty"
pattern = "^[A-Za-z0-9_.-]*$"
valid = bool(re.match(pattern, fqcn))
if not valid:
return "invalid char"
parts = FQCN.split(fqcn)
info = {}
for p in parts:
if not p:
return "empty part"
if info.get(p):
return f"dup '{p}'"
info[p] = True
return ""
@staticmethod
def get_root(fqcn: str) -> str:
parts = FQCN.split(fqcn)
return parts[0]
@staticmethod
def get_parent(fqcn: str) -> str:
parts = FQCN.split(fqcn)
if len(parts) == 1:
return ""
return FQCN.join(parts[0:-1])
@staticmethod
def is_parent(fqcn1: str, fqcn2: str) -> bool:
return fqcn1 == FQCN.get_parent(fqcn2)
@staticmethod
def is_ancestor(fqcn1: str, fqcn2: str) -> bool:
return fqcn2.startswith(fqcn1 + FQCN.SEPARATOR)
class FqcnInfo:
def __init__(self, fqcn: str):
self.fqcn = fqcn
self.path = FQCN.split(fqcn)
self.gen = len(self.path)
self.is_root = self.gen == 1
self.root = self.path[0]
self.is_on_server = self.root == FQCN.ROOT_SERVER
def same_family(info1: FqcnInfo, info2: FqcnInfo):
return info1.root == info2.root
| NVFlare-main | nvflare/fuel/f3/cellnet/fqcn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from cryptography.exceptions import InvalidKey, InvalidSignature
from cryptography.hazmat.primitives import asymmetric, ciphers, hashes, padding
from cryptography.x509 import Certificate
HASH_LENGTH = 4 # Adjustable to avoid collision
NONCE_LENGTH = 16 # For AES, this is 128 bits (i.e. block size)
KEY_LENGTH = 32 # AES 256. Choose from 16, 24, 32
HEADER_LENGTH = HASH_LENGTH + NONCE_LENGTH
PADDING_LENGTH = NONCE_LENGTH * 8 # in bits
KEY_ENC_LENGTH = 256
SIGNATURE_LENGTH = 256
SIMPLE_HEADER_LENGTH = NONCE_LENGTH + KEY_ENC_LENGTH + SIGNATURE_LENGTH
def get_hash(value):
hash = hashes.Hash(hashes.SHA256())
hash.update(value)
return hash.finalize()
class SessionKeyUnavailable(Exception):
pass
class InvalidCertChain(Exception):
pass
def _asym_enc(k, m):
return k.encrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _asym_dec(k, m):
return k.decrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _sign(k, m):
return k.sign(
data=m,
padding=asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()),
salt_length=asymmetric.padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
def _verify(k, m, s):
if not isinstance(m, bytes):
m = bytes(m)
if not isinstance(s, bytes):
s = bytes(s)
k.verify(
s,
m,
asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()), salt_length=asymmetric.padding.PSS.MAX_LENGTH
),
hashes.SHA256(),
)
def _sym_enc(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
encryptor = cipher.encryptor()
padder = padding.PKCS7(PADDING_LENGTH).padder()
padded_data = padder.update(m) + padder.finalize()
return encryptor.update(padded_data) + encryptor.finalize()
def _sym_dec(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
decryptor = cipher.decryptor()
plain_text = decryptor.update(m)
plain_text = plain_text + decryptor.finalize()
unpadder = padding.PKCS7(PADDING_LENGTH).unpadder()
return unpadder.update(plain_text) + unpadder.finalize()
class SessionKeyManager:
def __init__(self, root_ca):
self.key_hash_dict = dict()
self.root_ca = root_ca
self.root_ca_pub_key = root_ca.public_key()
def validate_cert_chain(self, cert):
self.root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def key_request(self, remote_cert, local_cert, local_pri_key):
session_key = os.urandom(KEY_LENGTH)
signature = _sign(local_pri_key, session_key)
try:
self.validate_cert_chain(remote_cert)
except InvalidSignature:
return False
remote_pub_key = remote_cert.public_key()
key_enc = _asym_enc(remote_pub_key, session_key)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
key_response = key_enc + signature
return key_response
def process_key_response(self, remote_cert, local_cert, local_pri_key, key_response):
key_enc, signature = key_response[:KEY_ENC_LENGTH], key_response[KEY_ENC_LENGTH:]
try:
session_key = _asym_dec(local_pri_key, key_enc)
self.validate_cert_chain(remote_cert)
public_key = remote_cert.public_key()
_verify(public_key, session_key, signature)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
except (InvalidKey, InvalidSignature):
return False
return True
def key_available(self):
return bool(self.key_hash_dict)
def get_key(self, key_hash):
return self.key_hash_dict.get(key_hash)
def get_latest_key(self):
try:
k, last_value = _, self.key_hash_dict[k] = self.key_hash_dict.popitem()
except KeyError as e:
raise SessionKeyUnavailable("No session key established yet")
return last_value
class CellCipher:
def __init__(self, session_key_manager: SessionKeyManager):
self.session_key_manager = session_key_manager
def encrypt(self, message):
key = self.session_key_manager.get_latest_key()
key_hash = get_hash(key)
nonce = os.urandom(NONCE_LENGTH)
return nonce + key_hash[-HASH_LENGTH:] + _sym_enc(key, nonce, message)
def decrypt(self, message):
nonce, key_hash, message = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH:HEADER_LENGTH],
message[HEADER_LENGTH:],
)
key = self.session_key_manager.get_key(key_hash)
if key is None:
raise SessionKeyUnavailable("No session key found for received message")
return _sym_dec(key, nonce, message)
class SimpleCellCipher:
def __init__(self, root_ca: Certificate, pri_key: asymmetric.rsa.RSAPrivateKey, cert: Certificate):
self._root_ca = root_ca
self._root_ca_pub_key = root_ca.public_key()
self._pri_key = pri_key
self._cert = cert
self._pub_key = cert.public_key()
self._validate_cert_chain(self._cert)
self._cached_enc = dict()
self._cached_dec = dict()
def _validate_cert_chain(self, cert: Certificate):
self._root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def encrypt(self, message: bytes, target_cert: Certificate):
cert_hash = hash(target_cert)
secret = self._cached_enc.get(cert_hash)
if secret is None:
self._validate_cert_chain(target_cert)
key = os.urandom(KEY_LENGTH)
remote_pub_key = target_cert.public_key()
key_enc = _asym_enc(remote_pub_key, key)
signature = _sign(self._pri_key, key_enc)
self._cached_enc[cert_hash] = (key, key_enc, signature)
else:
(key, key_enc, signature) = secret
nonce = os.urandom(NONCE_LENGTH)
ct = nonce + key_enc + signature + _sym_enc(key, nonce, message)
return ct
def decrypt(self, message: bytes, origin_cert: Certificate):
nonce, key_enc, signature = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH : NONCE_LENGTH + KEY_ENC_LENGTH],
message[NONCE_LENGTH + KEY_ENC_LENGTH : SIMPLE_HEADER_LENGTH],
)
if not isinstance(key_enc, bytes):
key_enc = bytes(key_enc)
key_hash = hash(key_enc)
dec = self._cached_dec.get(key_hash)
if dec is None:
self._validate_cert_chain(origin_cert)
public_key = origin_cert.public_key()
_verify(public_key, key_enc, signature)
key = _asym_dec(self._pri_key, key_enc)
self._cached_dec[key_hash] = key
else:
key = dec
return _sym_dec(key, nonce, message[SIMPLE_HEADER_LENGTH:])
| NVFlare-main | nvflare/fuel/f3/cellnet/cell_cipher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
class Callback:
def __init__(self, cb, args, kwargs):
self.cb = cb
self.args = args
self.kwargs = kwargs
class Registry:
def __init__(self):
self.reg = {} # channel/topic => _CB
@staticmethod
def _item_key(channel: str, topic: str) -> str:
return f"{channel}:{topic}"
def set(self, channel: str, topic: str, items: Any):
key = self._item_key(channel, topic)
self.reg[key] = items
def append(self, channel: str, topic: str, items: Any):
key = self._item_key(channel, topic)
item_list = self.reg.get(key)
if not item_list:
item_list = []
self.reg[key] = item_list
item_list.append(items)
def find(self, channel: str, topic: str) -> Any:
items = self.reg.get(self._item_key(channel, topic))
if not items:
# try topic * in channel
items = self.reg.get(self._item_key(channel, "*"))
if not items:
# try topic * in channel *
items = self.reg.get(self._item_key("*", "*"))
return items
| NVFlare-main | nvflare/fuel/f3/cellnet/registry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
import random
import threading
import time
import uuid
from typing import Dict, List, Tuple, Union
from urllib.parse import urlparse
from nvflare.fuel.f3.cellnet.connector_manager import ConnectorManager
from nvflare.fuel.f3.cellnet.credential_manager import CredentialManager
from nvflare.fuel.f3.cellnet.defs import (
AbortRun,
AuthenticationError,
CellPropertyKey,
InvalidRequest,
InvalidSession,
MessageHeaderKey,
MessagePropKey,
MessageType,
ReturnCode,
ReturnReason,
ServiceUnavailable,
)
from nvflare.fuel.f3.cellnet.fqcn import FQCN, FqcnInfo, same_family
from nvflare.fuel.f3.cellnet.registry import Callback, Registry
from nvflare.fuel.f3.cellnet.utils import decode_payload, encode_payload, format_log_message, make_reply
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.communicator import Communicator, MessageReceiver
from nvflare.fuel.f3.connection import Connection
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.endpoint import Endpoint, EndpointMonitor, EndpointState
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.mpm import MainProcessMonitor
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.security.logging import secure_format_exception, secure_format_traceback
_CHANNEL = "cellnet.channel"
_TOPIC_BULK = "bulk"
_TOPIC_BYE = "bye"
_SM_CHANNEL = "credential_manager"
_SM_TOPIC = "key_exchange"
_ONE_MB = 1024 * 1024
class TargetMessage:
def __init__(
self,
target: str,
channel: str,
topic: str,
message: Message,
):
self.target = target
self.channel = channel
self.topic = topic
self.message = message
message.add_headers(
{
MessageHeaderKey.TOPIC: topic,
MessageHeaderKey.CHANNEL: channel,
MessageHeaderKey.DESTINATION: target,
}
)
def to_dict(self):
return {
"target": self.target,
"channel": self.channel,
"topic": self.topic,
"message": {"headers": dict(self.message.headers), "payload": self.message.payload},
}
@staticmethod
def from_dict(d: dict):
msg_dict = d.get("message")
msg = Message(headers=msg_dict.get("headers"), payload=msg_dict.get("payload"))
return TargetMessage(target=d.get("target"), channel=d.get("channel"), topic=d.get("topic"), message=msg)
class CellAgent:
"""A CellAgent represents a cell in another cell."""
def __init__(self, fqcn: str, endpoint: Endpoint):
"""
Args:
fqcn: FQCN of the cell represented
"""
err = FQCN.validate(fqcn)
if err:
raise ValueError(f"Invalid FQCN '{fqcn}': {err}")
self.info = FqcnInfo(FQCN.normalize(fqcn))
self.endpoint = endpoint
def get_fqcn(self):
return self.info.fqcn
class _Waiter(threading.Event):
def __init__(self, targets: List[str]):
super().__init__()
self.targets = [x for x in targets]
self.reply_time = {} # target_id => reply recv timestamp
self.send_time = time.time()
self.id = str(uuid.uuid4())
self.received_replies = {}
def log_messaging_error(
logger, log_text: str, cell, msg: Union[Message, None], log_except=False, log_level=logging.ERROR
):
debug = False
if msg:
debug = msg.get_header(MessageHeaderKey.OPTIONAL, default=False)
log_text = format_log_message(cell.get_fqcn(), msg, log_text)
else:
log_text = f"{cell.get_fqcn()}: {log_text}"
if MainProcessMonitor.is_stopping():
debug = True
if debug:
log_level = logging.DEBUG
logger.log(log_level, log_text)
if log_except:
logger.log(log_level, secure_format_traceback())
class _BulkSender:
def __init__(self, cell, target: str, max_queue_size, secure=False):
self.cell = cell
self.target = target
self.max_queue_size = max_queue_size
self.secure = secure
self.messages = []
self.last_send_time = 0
self.lock = threading.Lock()
self.logger = logging.getLogger(self.__class__.__name__)
def queue_message(self, channel: str, topic: str, message: Message):
if self.secure:
message.add_headers({MessageHeaderKey.SECURE, True})
encode_payload(message)
self.cell.encrypt_payload(message)
with self.lock:
tm = TargetMessage(target=self.target, channel=channel, topic=topic, message=message)
self.messages.append(tm)
self.logger.debug(f"{self.cell.get_fqcn()}: bulk sender {self.target} queue size {len(self.messages)}")
def send(self):
with self.lock:
num_msgs = len(self.messages)
if num_msgs == 0:
return
if num_msgs <= self.max_queue_size:
messages_to_send = self.messages
self.messages = []
else:
messages_to_send = self.messages[: self.max_queue_size]
self.messages = self.messages[self.max_queue_size :]
self.logger.debug(
f"{self.cell.get_fqcn()}: bulk sender {self.target} sending bulk size {len(messages_to_send)}"
)
tms = [m.to_dict() for m in messages_to_send]
bulk_msg = Message(None, tms)
send_errs = self.cell.fire_and_forget(
channel=_CHANNEL, topic=_TOPIC_BULK, targets=[self.target], message=bulk_msg
)
if send_errs[self.target]:
log_messaging_error(
logger=self.logger,
msg=bulk_msg,
log_text=f"failed to send bulk message: {send_errs[self.target]}",
cell=self.cell,
)
else:
self.logger.debug(f"{self.cell.get_fqcn()}: sent bulk messages ({len(messages_to_send)}) to {self.target}")
self.last_send_time = time.time()
def _validate_url(url: str) -> bool:
if not isinstance(url, str) or not url:
return False
result = urlparse(url)
if not result.scheme or not result.netloc:
return False
return True
class _CounterName:
LATE = "late"
SENT = "sent"
RETURN = "return"
FORWARD = "forward"
RECEIVED = "received"
REPLIED = "replied"
REPLY_NONE = "no_reply:none"
NO_REPLY_LATE = "no_reply:late"
REPLY_NOT_EXPECTED = "no_reply_expected"
REQ_FILTER_ERROR = "req_filter_error"
REP_FILTER_ERROR = "rep_filter_error"
class CertificateExchanger:
"""This class handles cert-exchange messages"""
def __init__(self, core_cell, credential_manager: CredentialManager):
self.core_cell = core_cell
self.credential_manager = credential_manager
self.core_cell.register_request_cb(_SM_CHANNEL, _SM_TOPIC, self._handle_cert_request)
def get_certificate(self, target: str) -> bytes:
cert = self.credential_manager.get_certificate(target)
if cert:
return cert
cert = self.exchange_certificate(target)
self.credential_manager.save_certificate(target, cert)
return cert
def exchange_certificate(self, target: str) -> bytes:
root = FQCN.get_root(target)
req = self.credential_manager.create_request(root)
response = self.core_cell.send_request(_SM_CHANNEL, _SM_TOPIC, root, Message(None, req))
reply = response.payload
if not reply:
error_code = response.get_header(MessageHeaderKey.RETURN_CODE)
raise RuntimeError(f"Cert exchanged to {root} failed: {error_code}")
return self.credential_manager.process_response(reply)
def _handle_cert_request(self, request: Message):
reply = self.credential_manager.process_request(request.payload)
return Message(None, reply)
class CoreCell(MessageReceiver, EndpointMonitor):
APP_ID = 1
ERR_TYPE_MSG_TOO_BIG = "MsgTooBig"
ERR_TYPE_COMM = "CommErr"
ALL_CELLS = {} # cell name => Cell
SUB_TYPE_CHILD = 1
SUB_TYPE_CLIENT = 2
SUB_TYPE_NONE = 0
def __init__(
self,
fqcn: str,
root_url: str,
secure: bool,
credentials: dict,
create_internal_listener: bool = False,
parent_url: str = None,
max_timeout=3600,
bulk_check_interval=0.5,
bulk_process_interval=0.5,
max_bulk_size=100,
):
"""
Args:
fqcn: the Cell's FQCN (Fully Qualified Cell Name)
credentials: credentials for secure connections
root_url: the URL for backbone external connection
secure: secure mode or not
max_timeout: default timeout for send_and_receive
create_internal_listener: whether to create an internal listener for child cells
parent_url: url for connecting to parent cell
FQCN is the names of all ancestor, concatenated with dots.
.. note::
Internal listener is automatically created for root cells.
.. code-block:: text
Example:
server.J12345 (the cell for job J12345 on the server)
server (the root cell of server)
nih_1.J12345 (the cell for job J12345 on client_1's site)
client_1.J12345.R0 (the cell for rank R0 of J12345 on client_1 site)
client_1 (he root cell of client_1)
"""
if fqcn in self.ALL_CELLS:
raise ValueError(f"there is already a cell named {fqcn}")
comm_configurator = CommConfigurator()
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
self.max_msg_size = comm_configurator.get_max_message_size()
self.comm_configurator = comm_configurator
err = FQCN.validate(fqcn)
if err:
raise ValueError(f"Invalid FQCN '{fqcn}': {err}")
self.my_info = FqcnInfo(FQCN.normalize(fqcn))
self.secure = secure
self.logger.debug(f"{self.my_info.fqcn}: max_msg_size={self.max_msg_size}")
if not root_url:
raise ValueError(f"{self.my_info.fqcn}: root_url not provided")
if self.my_info.is_root and self.my_info.is_on_server:
if isinstance(root_url, list):
for url in root_url:
if not _validate_url(url):
raise ValueError(f"{self.my_info.fqcn}: invalid Root URL '{url}'")
else:
if not _validate_url(root_url):
raise ValueError(f"{self.my_info.fqcn}: invalid Root URL '{root_url}'")
root_url = [root_url]
else:
if isinstance(root_url, list):
# multiple urls are available - randomly pick one
root_url = random.choice(root_url)
self.logger.info(f"{self.my_info.fqcn}: use Root URL {root_url}")
if not _validate_url(root_url):
raise ValueError(f"{self.my_info.fqcn}: invalid Root URL '{root_url}'")
self.root_url = root_url
self.create_internal_listener = create_internal_listener
self.parent_url = parent_url
self.bulk_check_interval = bulk_check_interval
self.max_bulk_size = max_bulk_size
self.bulk_checker = None
self.bulk_senders = {}
self.bulk_process_interval = bulk_process_interval
self.bulk_messages = []
self.bulk_processor = None
self.bulk_lock = threading.Lock()
self.bulk_msg_lock = threading.Lock()
self.agents = {} # cell_fqcn => CellAgent
self.agent_lock = threading.Lock()
self.logger.debug(f"Creating Cell: {self.my_info.fqcn}")
ep = Endpoint(
name=fqcn,
conn_props=credentials,
properties={
CellPropertyKey.FQCN: self.my_info.fqcn,
},
)
self.communicator = Communicator(local_endpoint=ep)
self.endpoint = ep
self.connector_manager = ConnectorManager(
communicator=self.communicator, secure=secure, comm_configurator=comm_configurator
)
self.communicator.register_message_receiver(app_id=self.APP_ID, receiver=self)
self.communicator.register_monitor(monitor=self)
self.req_reg = Registry()
self.in_req_filter_reg = Registry() # for request received
self.out_reply_filter_reg = Registry() # for reply going out
self.out_req_filter_reg = Registry() # for request sent
self.in_reply_filter_reg = Registry() # for reply received
self.error_handler_reg = Registry()
self.cell_connected_cb = None
self.cell_connected_cb_args = None
self.cell_connected_cb_kwargs = None
self.cell_disconnected_cb = None
self.cell_disconnected_cb_args = None
self.cell_disconnected_cb_kwargs = None
self.message_interceptor = None
self.message_interceptor_args = None
self.message_interceptor_kwargs = None
self.waiters = {} # req_id => req
self.stats_lock = threading.Lock()
self.req_hw = 0
self.num_sar_reqs = 0 # send-and-receive
self.num_faf_reqs = 0
self.num_timeout_reqs = 0
# req_expiry specifies how long we keep requests in "reqs" table if they are
# not answered or picked up
if not max_timeout or max_timeout <= 0:
max_timeout = 3600 # one hour
self.max_timeout = max_timeout
self.asked_to_stop = False
self.running = False
self.stopping = False
# add appropriate drivers based on roles of the cell
# a cell can have at most two listeners: one for external, one for internal
self.ext_listeners = {} # external listeners: url => connector object
self.ext_listener_lock = threading.Lock()
self.ext_listener_impossible = False
self.int_listener = None # backbone internal listener - only for cells with child cells
# a cell could have any number of connectors: some for backbone, some for ad-hoc
self.bb_ext_connector = None # backbone external connector - only for Client cells
self.bb_int_connector = None # backbone internal connector - only for non-root cells
# ad-hoc connectors: currently only support ad-hoc external connectors
self.adhoc_connectors = {} # target cell fqcn => connector
self.adhoc_connector_lock = threading.Lock()
self.root_change_lock = threading.Lock()
self.register_request_cb(channel=_CHANNEL, topic=_TOPIC_BULK, cb=self._receive_bulk_message)
self.register_request_cb(channel=_CHANNEL, topic=_TOPIC_BYE, cb=self._peer_goodbye)
self.cleanup_waiter = None
self.msg_stats_pool = StatsPoolManager.add_time_hist_pool(
"Request_Response", "Request/response time in secs (sender)", scope=self.my_info.fqcn
)
self.req_cb_stats_pool = StatsPoolManager.add_time_hist_pool(
"Request_Processing",
"Time spent (secs) by request processing callbacks (receiver)",
scope=self.my_info.fqcn,
)
self.msg_travel_stats_pool = StatsPoolManager.add_time_hist_pool(
"Msg_Travel", "Time taken (secs) to get here (receiver)", scope=self.my_info.fqcn
)
self.sent_msg_size_pool = StatsPoolManager.add_msg_size_pool(
"Sent_Msg_sizes", "Sizes of messages sent (MBs)", scope=self.my_info.fqcn
)
self.received_msg_size_pool = StatsPoolManager.add_msg_size_pool(
"Received_Msg_Sizes", "Sizes of messages received (MBs)", scope=self.my_info.fqcn
)
counter_names = [_CounterName.SENT]
self.sent_msg_counter_pool = StatsPoolManager.add_counter_pool(
name="Sent_Msg_Counters",
description="Result counters of sent messages",
counter_names=counter_names,
scope=self.my_info.fqcn,
)
counter_names = [_CounterName.RECEIVED]
self.received_msg_counter_pool = StatsPoolManager.add_counter_pool(
name="Received_Msg_Counters",
description="Result counters of received messages",
counter_names=counter_names,
scope=self.my_info.fqcn,
)
self.ALL_CELLS[fqcn] = self
self.credential_manager = CredentialManager(self.endpoint)
self.cert_ex = CertificateExchanger(self, self.credential_manager)
def log_error(self, log_text: str, msg: Union[None, Message], log_except=False):
log_messaging_error(
logger=self.logger, log_text=log_text, cell=self, msg=msg, log_except=log_except, log_level=logging.ERROR
)
def log_warning(self, log_text: str, msg: Union[None, Message], log_except=False):
log_messaging_error(
logger=self.logger, log_text=log_text, cell=self, msg=msg, log_except=log_except, log_level=logging.WARNING
)
def get_root_url_for_child(self):
if isinstance(self.root_url, list):
return self.root_url[0]
else:
return self.root_url
def get_fqcn(self) -> str:
return self.my_info.fqcn
def is_cell_reachable(self, target_fqcn: str, for_msg=None) -> bool:
if target_fqcn in self.ALL_CELLS:
return True
_, ep = self._find_endpoint(target_fqcn, for_msg)
return ep is not None
def is_cell_connected(self, target_fqcn: str) -> bool:
if target_fqcn in self.ALL_CELLS:
return True
agent = self.agents.get(target_fqcn)
return agent is not None
def is_backbone_ready(self):
"""Check if backbone is ready.
Backbone is the preconfigured network connections, like all the connections from clients to server.
Adhoc connections are not part of the backbone.
"""
if not self.running:
return False
if self.my_info.is_root:
if self.my_info.is_on_server:
# server root - make sure listener is created
return len(self.ext_listeners) > 0
else:
# client root - must be connected to server root
if FQCN.ROOT_SERVER in self.ALL_CELLS:
return True
else:
return self.agents.get(FQCN.ROOT_SERVER) is not None
else:
# child cell - must be connected to parent
parent_fqcn = FQCN.get_parent(self.my_info.fqcn)
if parent_fqcn in self.ALL_CELLS:
return True
else:
return self.agents.get(parent_fqcn) is not None
def _set_bb_for_client_root(self):
self._create_bb_external_connector()
if self.create_internal_listener:
self._create_internal_listener()
def _set_bb_for_client_child(self, parent_url: str, create_internal_listener: bool):
if parent_url:
self._create_internal_connector(parent_url)
if create_internal_listener:
self._create_internal_listener()
if self.connector_manager.should_connect_to_server(self.my_info):
self._create_bb_external_connector()
def _set_bb_for_server_root(self):
if isinstance(self.root_url, list):
for url in self.root_url:
self.logger.info(f"{self.my_info.fqcn}: creating listener on {url}")
self._create_external_listener(url)
else:
self.logger.info(f"{self.my_info.fqcn}: creating listener on {self.root_url}")
if self.root_url:
self._create_external_listener(self.root_url)
if self.create_internal_listener:
self._create_internal_listener()
def _set_bb_for_server_child(self, parent_url: str, create_internal_listener: bool):
if FQCN.ROOT_SERVER in self.ALL_CELLS:
return
if parent_url:
self._create_internal_connector(parent_url)
if create_internal_listener:
self._create_internal_listener()
def change_server_root(self, to_url: str):
"""Change to a different server url
Args:
to_url: the new url of the server root
Returns:
"""
self.logger.debug(f"{self.my_info.fqcn}: changing server root to {to_url}")
with self.root_change_lock:
if self.my_info.is_on_server:
# only affect clients
self.logger.debug(f"{self.my_info.fqcn}: no change - on server side")
return
if to_url == self.root_url:
# already changed
self.logger.debug(f"{self.my_info.fqcn}: no change - same url")
return
self.root_url = to_url
self.drop_connectors()
self.drop_agents()
# recreate backbone connector to the root
if self.my_info.gen <= 2:
self.logger.debug(f"{self.my_info.fqcn}: recreating bb_external_connector ...")
self._create_bb_external_connector()
def drop_connectors(self):
# drop connections to all cells on server and their agents
# drop the backbone connector
if self.bb_ext_connector:
self.logger.debug(f"{self.my_info.fqcn}: removing bb_ext_connector ...")
try:
self.communicator.remove_connector(self.bb_ext_connector.handle)
self.communicator.remove_endpoint(FQCN.ROOT_SERVER)
except Exception as ex:
self.log_error(
msg=None,
log_text=f"{self.my_info.fqcn}: error removing bb_ext_connector {secure_format_exception(ex)}",
)
self.bb_ext_connector = None
# drop ad-hoc connectors to cells on server
with self.adhoc_connector_lock:
cells_to_delete = []
for to_cell in self.adhoc_connectors.keys():
to_cell_info = FqcnInfo(to_cell)
if to_cell_info.is_on_server:
cells_to_delete.append(to_cell)
for cell_name in cells_to_delete:
self.logger.debug(f"{self.my_info.fqcn}: removing adhoc connector to {cell_name}")
connector = self.adhoc_connectors.pop(cell_name, None)
if connector:
try:
self.communicator.remove_connector(connector.handle)
self.communicator.remove_endpoint(cell_name)
except:
self.log_error(
msg=None, log_text=f"error removing adhoc connector to {cell_name}", log_except=True
)
def drop_agents(self):
# drop agents
with self.agent_lock:
agents_to_delete = []
for fqcn, agent in self.agents.items():
assert isinstance(agent, CellAgent)
if agent.info.is_on_server:
agents_to_delete.append(fqcn)
for a in agents_to_delete:
self.logger.debug(f"{self.my_info.fqcn}: removing agent {a}")
self.agents.pop(a, None)
def make_internal_listener(self):
"""
Create the internal listener for child cells of this cell to connect to.
Returns:
"""
self._create_internal_listener()
def get_internal_listener_url(self) -> Union[None, str]:
"""Get the cell's internal listener url.
This method should only be used for cells that need to have child cells.
The url returned is to be passed to child of this cell to create connection
Returns: url for child cells to connect
"""
if not self.int_listener:
return None
return self.int_listener.get_connection_url()
def _add_adhoc_connector(self, to_cell: str, url: str):
if self.bb_ext_connector:
# it is possible that the server root offers connect url after the bb_ext_connector is created
# but the actual connection has not been established.
# Do not create another adhoc connection to the server!
if isinstance(self.root_url, str) and url == self.root_url:
return None
if isinstance(self.root_url, list) and url in self.root_url:
return None
with self.adhoc_connector_lock:
if to_cell in self.adhoc_connectors:
return self.adhoc_connectors[to_cell]
connector = self.connector_manager.get_external_connector(url, adhoc=True)
self.adhoc_connectors[to_cell] = connector
if connector:
self.logger.info(
f"{self.my_info.fqcn}: created adhoc connector {connector.handle} to {url} on {to_cell}"
)
else:
self.logger.info(f"{self.my_info.fqcn}: cannot create adhoc connector to {url} on {to_cell}")
return connector
def _create_internal_listener(self):
# internal listener is always backbone
if not self.int_listener:
self.int_listener = self.connector_manager.get_internal_listener()
if self.int_listener:
self.logger.info(
f"{self.my_info.fqcn}: created backbone internal listener "
f"for {self.int_listener.get_connection_url()}"
)
else:
raise RuntimeError(f"{self.my_info.fqcn}: cannot create backbone internal listener")
return self.int_listener
def _create_external_listener(self, url: str):
adhoc = len(url) == 0
if adhoc and not self.connector_manager.adhoc_allowed:
return None
with self.ext_listener_lock:
if url:
listener = self.ext_listeners.get(url)
if listener:
return listener
elif len(self.ext_listeners) > 0:
# no url specified - just pick one if any
k = random.choice(list(self.ext_listeners))
return self.ext_listeners[k]
listener = None
if not self.ext_listener_impossible:
self.logger.debug(f"{self.my_info.fqcn}: trying create ext listener: url={url}")
listener = self.connector_manager.get_external_listener(url, adhoc)
if listener:
if not adhoc:
self.logger.info(f"{self.my_info.fqcn}: created backbone external listener for {url}")
else:
self.logger.info(
f"{self.my_info.fqcn}: created adhoc external listener {listener.handle} "
f"for {listener.get_connection_url()}"
)
self.ext_listeners[listener.get_connection_url()] = listener
else:
if not adhoc:
raise RuntimeError(
f"{os.getpid()}: {self.my_info.fqcn}: "
f"cannot create backbone external listener for {url}"
)
else:
self.logger.warning(f"{self.my_info.fqcn}: cannot create adhoc external listener")
self.ext_listener_impossible = True
return listener
def _create_bb_external_connector(self):
if not self.root_url:
return
# if the root server a local cell?
if self.ALL_CELLS.get(FQCN.ROOT_SERVER):
# no need to connect
return
self.logger.debug(f"{self.my_info.fqcn}: creating connector to {self.root_url}")
self.bb_ext_connector = self.connector_manager.get_external_connector(self.root_url, False)
if self.bb_ext_connector:
self.logger.info(f"{self.my_info.fqcn}: created backbone external connector to {self.root_url}")
else:
raise RuntimeError(f"{self.my_info.fqcn}: cannot create backbone external connector to {self.root_url}")
def _create_internal_connector(self, url: str):
self.bb_int_connector = self.connector_manager.get_internal_connector(url)
if self.bb_int_connector:
self.logger.info(f"{self.my_info.fqcn}: created backbone internal connector to {url} on parent")
else:
raise RuntimeError(f"{self.my_info.fqcn}: cannot create backbone internal connector to {url} on parent")
def set_cell_connected_cb(self, cb, *args, **kwargs):
"""
Set a callback that is called when an external cell is connected.
Args:
cb: the callback function. It must follow the signature of cell_connected_cb_signature.
*args: args to be passed to the cb.
**kwargs: kwargs to be passed to the cb
Returns: None
"""
if not callable(cb):
raise ValueError(f"specified cell_connected_cb {type(cb)} is not callable")
self.cell_connected_cb = cb
self.cell_connected_cb_args = args
self.cell_connected_cb_kwargs = kwargs
def set_cell_disconnected_cb(self, cb, *args, **kwargs):
"""
Set a callback that is called when an external cell is disconnected.
Args:
cb: the callback function. It must follow the signature of cell_disconnected_cb_signature.
*args: args to be passed to the cb.
**kwargs: kwargs to be passed to the cb
Returns: None
"""
if not callable(cb):
raise ValueError(f"specified cell_disconnected_cb {type(cb)} is not callable")
self.cell_disconnected_cb = cb
self.cell_disconnected_cb_args = args
self.cell_disconnected_cb_kwargs = kwargs
def set_message_interceptor(self, cb, *args, **kwargs):
"""
Set a callback that is called when a message is received or forwarded.
Args:
cb: the callback function. It must follow the signature of message_interceptor_signature.
*args: args to be passed to the cb.
**kwargs: kwargs to be passed to the cb
Returns: None
"""
if not callable(cb):
raise ValueError(f"specified message_interceptor {type(cb)} is not callable")
self.message_interceptor = cb
self.message_interceptor_args = args
self.message_interceptor_kwargs = kwargs
def start(self):
"""
Start the cell after it is fully set up (connectors and listeners are added, CBs are set up)
Returns:
"""
if self.my_info.is_on_server:
if self.my_info.is_root:
self._set_bb_for_server_root()
else:
self._set_bb_for_server_child(self.parent_url, self.create_internal_listener)
else:
# client side
if self.my_info.is_root:
self._set_bb_for_client_root()
else:
self._set_bb_for_client_child(self.parent_url, self.create_internal_listener)
self.communicator.start()
self.running = True
def stop(self):
"""
Cleanup the cell. Once the cell is stopped, it won't be able to send/receive messages.
Returns:
"""
if not self.running:
return
if self.stopping:
return
self.stopping = True
self.logger.debug(f"{self.my_info.fqcn}: Stopping Cell")
# notify peers that I am gone
with self.agent_lock:
if self.agents:
targets = [peer_name for peer_name in self.agents.keys()]
self.logger.debug(f"broadcasting goodbye to {targets}")
self.broadcast_request(
channel=_CHANNEL,
topic=_TOPIC_BYE,
targets=targets,
request=Message(),
timeout=0.5,
optional=True,
)
self.running = False
self.asked_to_stop = True
if self.bulk_checker is not None and self.bulk_checker.is_alive():
self.bulk_checker.join()
if self.bulk_processor is not None and self.bulk_processor.is_alive():
self.bulk_processor.join()
try:
# we can now stop the communicator
self.communicator.stop()
except Exception as ex:
self.log_error(
msg=None, log_text=f"error stopping Communicator: {secure_format_exception(ex)}", log_except=True
)
self.logger.debug(f"{self.my_info.fqcn}: cell stopped!")
def register_request_cb(self, channel: str, topic: str, cb, *args, **kwargs):
"""
Register a callback for handling request. The CB must follow request_cb_signature.
Args:
channel: the channel of the request
topic: topic of the request
cb:
*args:
**kwargs:
Returns:
"""
if not callable(cb):
raise ValueError(f"specified request_cb {type(cb)} is not callable")
self.req_reg.set(channel, topic, Callback(cb, args, kwargs))
def encrypt_payload(self, message: Message):
if not message.get_header(MessageHeaderKey.SECURE, False):
return
encrypted = message.get_header(MessageHeaderKey.ENCRYPTED, False)
if encrypted:
# Prevent double encryption
return
target = message.get_header(MessageHeaderKey.DESTINATION)
if not target:
raise RuntimeError("Message destination missing")
if message.payload is None:
message.payload = bytes(0)
payload_len = len(message.payload)
message.add_headers(
{
MessageHeaderKey.PAYLOAD_LEN: payload_len,
MessageHeaderKey.ENCRYPTED: True,
}
)
target_cert = self.cert_ex.get_certificate(target)
message.payload = self.credential_manager.encrypt(target_cert, message.payload)
self.logger.debug(f"Payload ({payload_len} bytes) is encrypted ({len(message.payload)} bytes)")
def decrypt_payload(self, message: Message):
if not message.get_header(MessageHeaderKey.SECURE, False):
return
encrypted = message.get_header(MessageHeaderKey.ENCRYPTED, False)
if not encrypted:
# Message is already decrypted
return
message.remove_header(MessageHeaderKey.ENCRYPTED)
origin = message.get_header(MessageHeaderKey.ORIGIN)
if not origin:
raise RuntimeError("Message origin missing")
payload_len = message.get_header(MessageHeaderKey.PAYLOAD_LEN)
origin_cert = self.cert_ex.get_certificate(origin)
message.payload = self.credential_manager.decrypt(origin_cert, message.payload)
if len(message.payload) != payload_len:
raise RuntimeError(f"Payload size changed after decryption {len(message.payload)} <> {payload_len}")
def add_incoming_request_filter(self, channel: str, topic: str, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified incoming_request_filter {type(cb)} is not callable")
self.in_req_filter_reg.append(channel, topic, Callback(cb, args, kwargs))
def add_outgoing_reply_filter(self, channel: str, topic: str, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified outgoing_reply_filter {type(cb)} is not callable")
self.out_reply_filter_reg.append(channel, topic, Callback(cb, args, kwargs))
def add_outgoing_request_filter(self, channel: str, topic: str, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified outgoing_request_filter {type(cb)} is not callable")
self.out_req_filter_reg.append(channel, topic, Callback(cb, args, kwargs))
def add_incoming_reply_filter(self, channel: str, topic: str, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified incoming_reply_filter {type(cb)} is not callable")
self.in_reply_filter_reg.append(channel, topic, Callback(cb, args, kwargs))
def add_error_handler(self, channel: str, topic: str, cb, *args, **kwargs):
if not callable(cb):
raise ValueError(f"specified error_handler {type(cb)} is not callable")
self.error_handler_reg.set(channel, topic, Callback(cb, args, kwargs))
def _filter_outgoing_request(self, channel: str, topic: str, request: Message) -> Union[None, Message]:
cbs = self.out_req_filter_reg.find(channel, topic)
if not cbs:
return None
for _cb in cbs:
assert isinstance(_cb, Callback)
reply = self._try_cb(request, _cb.cb, *_cb.args, **_cb.kwargs)
if reply:
return reply
def _try_path(self, fqcn_path: List[str]) -> Union[None, Endpoint]:
self.logger.debug(f"{self.my_info.fqcn}: trying path {fqcn_path} ...")
target = FQCN.join(fqcn_path)
agent = self.agents.get(target, None)
if agent:
# there is a direct path to the target call
self.logger.debug(f"{self.my_info.fqcn}: got cell agent for {target}")
return agent.endpoint
else:
self.logger.debug(f"{self.my_info.fqcn}: no CellAgent for {target}")
if len(fqcn_path) == 1:
return None
return self._try_path(fqcn_path[:-1])
def _find_endpoint(self, target_fqcn: str, for_msg: Message) -> Tuple[str, Union[None, Endpoint]]:
err = FQCN.validate(target_fqcn)
if err:
self.log_error(msg=None, log_text=f"invalid target FQCN '{target_fqcn}': {err}")
return ReturnCode.INVALID_TARGET, None
try:
ep = self._try_find_ep(target_fqcn, for_msg)
if not ep:
return ReturnCode.TARGET_UNREACHABLE, None
return "", ep
except:
self.log_error(msg=for_msg, log_text=f"Error when finding {target_fqcn}", log_except=True)
return ReturnCode.TARGET_UNREACHABLE, None
def _try_find_ep(self, target_fqcn: str, for_msg: Message) -> Union[None, Endpoint]:
self.logger.debug(f"{self.my_info.fqcn}: finding path to {target_fqcn}")
if target_fqcn == self.my_info.fqcn:
return self.endpoint
target_info = FqcnInfo(target_fqcn)
# is there a direct path to the target?
if target_fqcn in self.ALL_CELLS:
return Endpoint(target_fqcn)
agent = self.agents.get(target_fqcn)
if agent:
return agent.endpoint
if same_family(self.my_info, target_info):
if FQCN.is_parent(self.my_info.fqcn, target_fqcn):
self.log_warning(msg=for_msg, log_text=f"no connection to child {target_fqcn}")
return None
elif FQCN.is_parent(target_fqcn, self.my_info.fqcn):
self.log_warning(f"no connection to parent {target_fqcn}", for_msg)
self.logger.debug(f"{self.my_info.fqcn}: find path in the same family")
if FQCN.is_ancestor(self.my_info.fqcn, target_fqcn):
# I am the ancestor of the target
self.logger.debug(f"{self.my_info.fqcn}: I'm ancestor of the target {target_fqcn}")
return self._try_path(target_info.path)
else:
# target is my ancestor, or we share the same ancestor - go to my parent!
self.logger.debug(f"{self.my_info.fqcn}: target {target_fqcn} is or share my ancestor")
parent_fqcn = FQCN.get_parent(self.my_info.fqcn)
agent = self.agents.get(parent_fqcn)
if not agent:
self.log_warning(f"no connection to parent {parent_fqcn}", for_msg)
return None
return agent.endpoint
# not the same family
ep = self._try_path(target_info.path)
if ep:
return ep
# cannot find path to the target
# try the server root
# we assume that all client roots connect to the server root.
# Do so only if I'm not the server root
if not self.my_info.is_root or not self.my_info.is_on_server:
if FQCN.ROOT_SERVER in self.ALL_CELLS:
return Endpoint(FQCN.ROOT_SERVER)
root_agent = self.agents.get(FQCN.ROOT_SERVER)
if root_agent:
return root_agent.endpoint
# no direct path to the server root
# let my parent handle it if I have a parent
if self.my_info.gen > 1:
parent_fqcn = FQCN.get_parent(self.my_info.fqcn)
agent = self.agents.get(parent_fqcn)
if not agent:
self.log_warning(f"no connection to parent {parent_fqcn}", for_msg)
return None
return agent.endpoint
self.log_warning(f"no connection to {target_fqcn}", for_msg)
return None
def _send_to_endpoint(self, to_endpoint: Endpoint, message: Message) -> str:
err = ""
try:
encode_payload(message)
self.encrypt_payload(message)
message.set_header(MessageHeaderKey.SEND_TIME, time.time())
if not message.payload:
msg_size = 0
else:
msg_size = len(message.payload)
if msg_size > self.max_msg_size:
err_text = f"message is too big ({msg_size} > {self.max_msg_size}"
self.log_error(err_text, message)
err = ReturnCode.MSG_TOO_BIG
else:
direct_cell = self.ALL_CELLS.get(to_endpoint.name)
if direct_cell:
# create a thread and fire the cell's process_message!
# self.DIRECT_MSG_EXECUTOR.submit(self._send_direct_message, direct_cell, message)
self._send_direct_message(direct_cell, message)
else:
self.communicator.send(to_endpoint, CoreCell.APP_ID, message)
self.sent_msg_size_pool.record_value(
category=self._stats_category(message), value=self._msg_size_mbs(message)
)
except Exception as ex:
err_text = f"Failed to send message to {to_endpoint.name}: {secure_format_exception(ex)}"
self.log_error(err_text, message)
self.logger.debug(secure_format_traceback())
err = ReturnCode.COMM_ERROR
return err
def _send_direct_message(self, target_cell, message):
target_cell.process_message(
endpoint=Endpoint(self.my_info.fqcn), connection=None, app_id=self.APP_ID, message=message
)
def _send_target_messages(
self,
target_msgs: Dict[str, TargetMessage],
) -> Dict[str, str]:
if not self.running:
raise RuntimeError("Messenger is not running")
send_errs = {}
reachable_targets = {} # target fqcn => endpoint
for t, tm in target_msgs.items():
err, ep = self._find_endpoint(t, tm.message)
if ep:
reachable_targets[t] = ep
else:
self.log_error(f"cannot send to '{t}': {err}", tm.message)
send_errs[t] = err
for t, ep in reachable_targets.items():
tm = target_msgs[t]
req = Message(headers=copy.copy(tm.message.headers), payload=tm.message.payload)
req.add_headers(
{
MessageHeaderKey.CHANNEL: tm.channel,
MessageHeaderKey.TOPIC: tm.topic,
MessageHeaderKey.ORIGIN: self.my_info.fqcn,
MessageHeaderKey.FROM_CELL: self.my_info.fqcn,
MessageHeaderKey.MSG_TYPE: MessageType.REQ,
MessageHeaderKey.ROUTE: [(self.my_info.fqcn, time.time())],
MessageHeaderKey.DESTINATION: t,
MessageHeaderKey.TO_CELL: ep.name,
}
)
# invoke outgoing req filters
req_filters = self.out_req_filter_reg.find(tm.channel, tm.topic)
if req_filters:
self.logger.debug(f"{self.my_info.fqcn}: invoking outgoing request filters")
assert isinstance(req_filters, list)
for f in req_filters:
assert isinstance(f, Callback)
r = self._try_cb(req, f.cb, *f.args, **f.kwargs)
if r:
send_errs[t] = ReturnCode.FILTER_ERROR
break
if send_errs.get(t):
# process next target
continue
# is this a direct path?
ti = FqcnInfo(t)
allow_adhoc = self.connector_manager.is_adhoc_allowed(ti, self.my_info)
if allow_adhoc and t != ep.name:
# Not a direct path since the destination and the next leg are not the same
if not ti.is_on_server and (self.my_info.is_on_server or self.my_info.fqcn > t):
# try to get or create a listener and let the peer know the endpoint
listener = self._create_external_listener("")
if listener:
conn_url = listener.get_connection_url()
req.set_header(MessageHeaderKey.CONN_URL, conn_url)
err = self._send_to_endpoint(ep, req)
if err:
self.log_error(f"failed to send to endpoint {ep.name}: {err}", req)
else:
self.sent_msg_counter_pool.increment(category=self._stats_category(req), counter_name=_CounterName.SENT)
send_errs[t] = err
return send_errs
def _send_to_targets(
self,
channel: str,
topic: str,
targets: Union[str, List[str]],
message: Message,
) -> Dict[str, str]:
if isinstance(targets, str):
targets = [targets]
target_msgs = {}
for t in targets:
target_msgs[t] = TargetMessage(t, channel, topic, message)
return self._send_target_messages(target_msgs)
def send_request(
self, channel: str, topic: str, target: str, request: Message, timeout=None, secure=False, optional=False
) -> Message:
self.logger.debug(f"{self.my_info.fqcn}: sending request {channel}:{topic} to {target}")
result = self.broadcast_request(channel, topic, [target], request, timeout, secure, optional)
assert isinstance(result, dict)
return result.get(target)
def broadcast_multi_requests(
self, target_msgs: Dict[str, TargetMessage], timeout=None, secure=False, optional=False
) -> Dict[str, Message]:
"""
This is the core of the request/response handling. Be extremely careful when making any changes!
To maximize the communication efficiency, we avoid the use of locks.
We use a waiter implemented as a Python threading.Event object.
We create the waiter, send out messages, set up default responses, and set it up to wait for response.
Once the waiter is triggered from a reply-receiving thread, we process received results.
HOWEVER, if the network is extremely fast, the response may already be received even before we finish setting
up the waiter in this thread!
We had a very mysterious bug that caused a request to be treated as timeout even though the reply is received.
It was both threads try to set values to "waiter.replies". In case of extremely fast network, the reply
processing thread set the reply to "waiter.replies", and then overwritten by this thread with a default timeout
reply.
To avoid this kind of problems, we now use two sets of values in the waiter object.
One set is for this thread: targets
Another set is for the reply processing thread: received_replies, reply_time
Args:
target_msgs: messages to be sent
timeout: timeout value
secure: End-end encryption
optional: whether the message is optional
Returns: a dict of: target name => reply message
"""
targets = [t for t in target_msgs]
self.logger.debug(f"{self.my_info.fqcn}: broadcasting to {targets} ...")
waiter = _Waiter(targets)
if waiter.id in self.waiters:
raise RuntimeError("waiter not unique!")
self.waiters[waiter.id] = waiter
now = time.time()
if not timeout:
timeout = self.max_timeout
result = {}
try:
for _, tm in target_msgs.items():
request = tm.message
request.add_headers(
{
MessageHeaderKey.REQ_ID: waiter.id,
MessageHeaderKey.REPLY_EXPECTED: True,
MessageHeaderKey.SECURE: secure,
MessageHeaderKey.OPTIONAL: optional,
}
)
send_errs = self._send_target_messages(target_msgs)
send_count = 0
timeout_reply = make_reply(ReturnCode.TIMEOUT)
# NOTE: it is possible that reply is already received and the waiter is triggered by now!
# if waiter.received_replies:
# self.logger.info(f"{self.my_info.fqcn}: the network is extremely fast - response already received!")
topics = []
for_msg = None
for t, err in send_errs.items():
if not err:
send_count += 1
result[t] = timeout_reply
tm = target_msgs[t]
topic = tm.message.get_header(MessageHeaderKey.TOPIC, "?")
if topic not in topics:
topics.append(topic)
if not for_msg:
for_msg = tm.message
else:
result[t] = make_reply(rc=err)
waiter.reply_time[t] = now
if send_count > 0:
self.num_sar_reqs += 1
num_reqs = len(self.waiters)
if self.req_hw < num_reqs:
self.req_hw = num_reqs
# wait for reply
self.logger.debug(f"{self.my_info.fqcn}: set up waiter {waiter.id} to wait for {timeout} secs")
if not waiter.wait(timeout=timeout):
# timeout
self.log_error(f"timeout on Request {waiter.id} for {topics} after {timeout} secs", for_msg)
with self.stats_lock:
self.num_timeout_reqs += 1
except Exception as ex:
raise ex
finally:
self.waiters.pop(waiter.id, None)
self.logger.debug(f"released waiter on REQ {waiter.id}")
if waiter.received_replies:
result.update(waiter.received_replies)
for t, reply in result.items():
rc = reply.get_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
self.sent_msg_counter_pool.increment(category=self._stats_category(reply), counter_name=rc)
return result
def broadcast_request(
self,
channel: str,
topic: str,
targets: Union[str, List[str]],
request: Message,
timeout=None,
secure=False,
optional=False,
) -> Dict[str, Message]:
"""
Send a message over a channel to specified destination cell(s), and wait for reply
Args:
channel: channel for the message
topic: topic of the message
targets: FQCN of the destination cell(s)
request: message to be sent
timeout: how long to wait for replies
secure: End-end encryption
optional: whether the message is optional
Returns: a dict of: cell_id => reply message
"""
if isinstance(targets, str):
targets = [targets]
target_msgs = {}
for t in targets:
target_msgs[t] = TargetMessage(t, channel, topic, request)
return self.broadcast_multi_requests(target_msgs, timeout, secure=secure, optional=optional)
def fire_and_forget(
self, channel: str, topic: str, targets: Union[str, List[str]], message: Message, secure=False, optional=False
) -> Dict[str, str]:
"""
Send a message over a channel to specified destination cell(s), and do not wait for replies.
Args:
channel: channel for the message
topic: topic of the message
targets: one or more destination cell IDs. None means all.
message: message to be sent
secure: End-end encryption of the message
optional: whether the message is optional
Returns: None
"""
message.add_headers(
{
MessageHeaderKey.REPLY_EXPECTED: False,
MessageHeaderKey.OPTIONAL: optional,
MessageHeaderKey.SECURE: secure,
}
)
return self._send_to_targets(channel, topic, targets, message)
def queue_message(self, channel: str, topic: str, targets: Union[str, List[str]], message: Message, optional=False):
if self.max_bulk_size <= 0:
raise RuntimeError(f"{self.get_fqcn()}: bulk message is not enabled!")
if isinstance(targets, str):
targets = [targets]
message.set_header(MessageHeaderKey.OPTIONAL, optional)
with self.bulk_lock:
if self.bulk_checker is None:
self.logger.info(f"{self.my_info.fqcn}: starting bulk_checker")
self.bulk_checker = threading.Thread(target=self._check_bulk, name="check_bulk_msg")
self.bulk_checker.start()
self.logger.info(f"{self.my_info.fqcn}: started bulk_checker")
for t in targets:
sender = self.bulk_senders.get(t)
if not sender:
sender = _BulkSender(cell=self, target=t, max_queue_size=self.max_bulk_size)
self.bulk_senders[t] = sender
sender.queue_message(channel=channel, topic=topic, message=message)
self.logger.info(f"{self.get_fqcn()}: queued msg for {t}")
def _peer_goodbye(self, request: Message):
peer_ep = request.get_prop(MessagePropKey.ENDPOINT)
if not peer_ep:
self.log_error("no endpoint prop in message", request)
return
assert isinstance(peer_ep, Endpoint)
with self.agent_lock:
self.logger.debug(f"{self.my_info.fqcn}: got goodbye from cell {peer_ep.name}")
ep = self.agents.pop(peer_ep.name, None)
if ep:
self.logger.debug(f"{self.my_info.fqcn}: removed agent for {peer_ep.name}")
else:
self.logger.debug(f"{self.my_info.fqcn}: agent for {peer_ep.name} is already gone")
# ack back
return Message()
def _receive_bulk_message(self, request: Message):
target_msgs = request.payload
assert isinstance(target_msgs, list)
with self.bulk_msg_lock:
if self.bulk_processor is None:
self.logger.debug(f"{self.my_info.fqcn}: starting bulk message processor")
self.bulk_processor = threading.Thread(target=self._process_bulk_messages, name="process_bulk_msg")
self.bulk_processor.start()
self.logger.debug(f"{self.my_info.fqcn}: started bulk message processor")
self.bulk_messages.append(request)
self.logger.debug(f"{self.get_fqcn()}: received bulk msg. Pending size {len(self.bulk_messages)}")
def _process_bulk_messages(self):
self.logger.debug(f"{self.get_fqcn()}: processing bulks ...")
while not self.asked_to_stop:
self._process_pending_bulks()
time.sleep(self.bulk_process_interval)
# process remaining messages if any
self._process_pending_bulks()
def _process_pending_bulks(self):
while True:
with self.bulk_msg_lock:
if not self.bulk_messages:
return
bulk = self.bulk_messages.pop(0)
self._process_one_bulk(bulk)
def _process_one_bulk(self, bulk_request: Message):
target_msgs = bulk_request.payload
assert isinstance(target_msgs, list)
self.logger.debug(f"{self.get_fqcn()}: processing one bulk size {len(target_msgs)}")
for tmd in target_msgs:
assert isinstance(tmd, dict)
tm = TargetMessage.from_dict(tmd)
assert isinstance(tm, TargetMessage)
req = tm.message
req.add_headers(bulk_request.headers)
req.add_headers({MessageHeaderKey.TOPIC: tm.topic, MessageHeaderKey.CHANNEL: tm.channel})
origin = bulk_request.get_header(MessageHeaderKey.ORIGIN, "")
self.logger.debug(f"{self.get_fqcn()}: bulk item: {req.headers}")
self._process_request(origin=origin, message=req)
def fire_multi_requests_and_forget(self, target_msgs: Dict[str, TargetMessage], optional=False) -> Dict[str, str]:
for _, tm in target_msgs.items():
request = tm.message
request.add_headers({MessageHeaderKey.REPLY_EXPECTED: False, MessageHeaderKey.OPTIONAL: optional})
return self._send_target_messages(target_msgs)
def send_reply(self, reply: Message, to_cell: str, for_req_ids: List[str], secure=False, optional=False) -> str:
"""Send a reply to respond to one or more requests.
This is useful if the request receiver needs to delay its reply as follows:
- When a request is received, if it's not ready to reply (e.g. waiting for additional requests from
other cells), simply remember the REQ_ID and returns None;
- The receiver may queue up multiple such requests
- When ready, call this method to send the reply for all the queued requests
Args:
reply: the reply message
to_cell: the target cell
for_req_ids: the list of req IDs that the reply is for
secure: End-end encryption
optional: whether the message is optional
Returns: an error message if any
"""
reply.add_headers(
{
MessageHeaderKey.FROM_CELL: self.my_info.fqcn,
MessageHeaderKey.ORIGIN: self.my_info.fqcn,
MessageHeaderKey.ROUTE: [(self.my_info.fqcn, time.time())],
MessageHeaderKey.DESTINATION: to_cell,
MessageHeaderKey.REQ_ID: for_req_ids,
MessageHeaderKey.MSG_TYPE: MessageType.REPLY,
MessageHeaderKey.SECURE: secure,
MessageHeaderKey.OPTIONAL: optional,
}
)
err, ep = self._find_endpoint(to_cell, reply)
if err:
return err
reply.set_header(MessageHeaderKey.TO_CELL, ep.name)
return self._send_to_endpoint(ep, reply)
def _try_cb(self, message, cb, *args, **kwargs):
try:
self.logger.debug(f"{self.my_info.fqcn}: calling CB {cb.__name__}")
return cb(message, *args, **kwargs)
except ServiceUnavailable:
return make_reply(ReturnCode.SERVICE_UNAVAILABLE)
except InvalidSession:
return make_reply(ReturnCode.INVALID_SESSION)
except InvalidRequest:
return make_reply(ReturnCode.INVALID_REQUEST)
except AuthenticationError:
return make_reply(ReturnCode.AUTHENTICATION_ERROR)
except AbortRun:
return make_reply(ReturnCode.ABORT_RUN)
except Exception as ex:
self.log_error(
f"exception from CB {cb.__name__}: {secure_format_exception(ex)}", msg=message, log_except=True
)
return make_reply(ReturnCode.PROCESS_EXCEPTION)
def process_message(self, endpoint: Endpoint, connection: Connection, app_id: int, message: Message):
# this is the receiver callback
try:
self._process_received_msg(endpoint, connection, message)
except Exception as ex:
self.log_error(
f"Error processing received message: {secure_format_exception(ex)}", msg=message, log_except=True
)
def _process_request(self, origin: str, message: Message) -> Union[None, Message]:
self.logger.debug(f"{self.my_info.fqcn}: processing incoming request")
self.decrypt_payload(message)
decode_payload(message)
# this is a request for me - dispatch to the right CB
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
_cb = self.req_reg.find(channel, topic)
if not _cb:
self.log_error(f"no callback for request ({topic}@{channel}) from cell '{origin}'", message)
return make_reply(ReturnCode.PROCESS_EXCEPTION, error="no callback")
# invoke incoming request filters
req_filters = self.in_req_filter_reg.find(channel, topic)
if req_filters:
self.logger.debug(f"{self.my_info.fqcn}: invoking incoming request filters")
assert isinstance(req_filters, list)
for f in req_filters:
assert isinstance(f, Callback)
reply = self._try_cb(message, f.cb, *f.args, **f.kwargs)
if reply:
return reply
assert isinstance(_cb, Callback)
self.logger.debug(f"{self.my_info.fqcn}: calling registered request CB")
cb_start = time.perf_counter()
reply = self._try_cb(message, _cb.cb, *_cb.args, **_cb.kwargs)
cb_end = time.perf_counter()
self.req_cb_stats_pool.record_value(category=self._stats_category(message), value=cb_end - cb_start)
if not reply:
# the CB doesn't have anything to reply
return None
if not isinstance(reply, Message):
self.log_error(
f"bad result from request CB for topic {topic} on channel {channel}: "
f"expect Message but got {type(reply)}",
msg=message,
)
reply = make_reply(ReturnCode.PROCESS_EXCEPTION, error="bad cb result")
# Reply must be secure if request is
reply.add_headers({MessageHeaderKey.SECURE: message.get_header(MessageHeaderKey.SECURE, False)})
return reply
def _add_to_route(self, message: Message):
route = message.get_header(MessageHeaderKey.ROUTE, None)
if not route:
route = []
message.set_header(MessageHeaderKey.ROUTE, route)
if not isinstance(route, list):
self.log_error(f"bad route header: expect list but got {type(route)}", msg=message)
else:
route.append((self.my_info.fqcn, time.time()))
def _forward(self, endpoint: Endpoint, origin: str, destination: str, msg_type: str, message: Message):
# not for me - need to forward it
self.logger.debug(f"{self.my_info.fqcn}: forwarding for {origin} to {destination}")
err, ep = self._find_endpoint(destination, message)
if ep:
self.logger.debug(f"{self.my_info.fqcn}: found next leg {ep.name}")
message.add_headers({MessageHeaderKey.FROM_CELL: self.my_info.fqcn, MessageHeaderKey.TO_CELL: ep.name})
self._add_to_route(message)
err = self._send_to_endpoint(to_endpoint=ep, message=message)
if not err:
self.logger.debug(f"{self.my_info.fqcn}: forwarded successfully!")
return
else:
self.log_error(f"failed to forward {msg_type}: {err}", msg=message)
else:
# cannot find next leg endpoint
self.log_error(f"cannot forward {msg_type}: no path", message)
if msg_type == MessageType.REQ:
reply_expected = message.get_header(MessageHeaderKey.REPLY_EXPECTED, False)
if not reply_expected:
self.logger.debug(f"{self.my_info.fqcn}: can't forward: drop the message since reply is not expected")
return
# tell the requester that message couldn't be delivered
req_id = message.get_header(MessageHeaderKey.REQ_ID, "")
reply = make_reply(ReturnCode.COMM_ERROR, error="cannot forward")
reply.add_headers(
{
MessageHeaderKey.ORIGINAL_HEADERS: message.headers,
MessageHeaderKey.FROM_CELL: self.my_info.fqcn,
MessageHeaderKey.TO_CELL: endpoint.name,
MessageHeaderKey.ORIGIN: self.my_info.fqcn,
MessageHeaderKey.DESTINATION: origin,
MessageHeaderKey.REQ_ID: [req_id],
MessageHeaderKey.MSG_TYPE: MessageType.RETURN,
MessageHeaderKey.ROUTE: [(self.my_info.fqcn, time.time())],
MessageHeaderKey.RETURN_REASON: ReturnReason.CANT_FORWARD,
}
)
self._send_to_endpoint(endpoint, reply)
self.logger.debug(f"{self.my_info.fqcn}: sent RETURN message back to {endpoint.name}")
else:
# msg_type is either RETURN or REPLY - drop it.
self.logger.debug(format_log_message(self.my_info.fqcn, message, "dropped forwarded message"))
def _stats_category(self, message: Message):
channel = message.get_header(MessageHeaderKey.CHANNEL, "?")
topic = message.get_header(MessageHeaderKey.TOPIC, "?")
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE, "?")
dest = message.get_header(MessageHeaderKey.DESTINATION, "")
origin = message.get_header(MessageHeaderKey.ORIGIN, "")
to_cell = message.get_header(MessageHeaderKey.TO_CELL, "")
type_tag = msg_type
if dest and origin:
if dest != self.my_info.fqcn and origin != self.my_info.fqcn:
# this is the case of forwarding
type_tag = "fwd." + msg_type
if msg_type == MessageType.RETURN:
orig_headers = message.get_header(MessageHeaderKey.ORIGINAL_HEADERS, None)
if orig_headers:
channel = orig_headers.get(MessageHeaderKey.CHANNEL, "??")
topic = orig_headers.get(MessageHeaderKey.TOPIC, "??")
else:
channel = "???"
topic = "???"
return f"{type_tag}:{channel}:{topic}"
def _process_reply(self, origin: str, message: Message, msg_type: str):
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
now = time.time()
self.logger.debug(f"{self.my_info.fqcn}: processing reply from {origin} for type {msg_type}")
self.decrypt_payload(message)
decode_payload(message)
req_ids = message.get_header(MessageHeaderKey.REQ_ID)
if not req_ids:
raise RuntimeError(format_log_message(self.my_info.fqcn, message, "reply does not have REQ_ID header"))
if isinstance(req_ids, str):
req_ids = [req_ids]
if not isinstance(req_ids, list):
raise RuntimeError(
format_log_message(self.my_info.fqcn, message, f"REQ_ID must be list of ids but got {type(req_ids)}")
)
req_destination = origin
if msg_type == MessageType.RETURN:
self.logger.debug(format_log_message(self.my_info.fqcn, message, "message is returned"))
self.sent_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.RETURN
)
original_headers = message.get_header(MessageHeaderKey.ORIGINAL_HEADERS, None)
if not original_headers:
raise RuntimeError(
format_log_message(self.my_info.fqcn, message, "missing ORIGINAL_HEADERS in returned message!")
)
req_destination = original_headers.get(MessageHeaderKey.DESTINATION, None)
if not req_destination:
raise RuntimeError(
format_log_message(self.my_info.fqcn, message, "missing DESTINATION header in original headers")
)
else:
# invoking incoming reply filter
reply_filters = self.in_reply_filter_reg.find(channel, topic)
if reply_filters:
self.logger.debug(f"{self.my_info.fqcn}: invoking incoming reply filters")
assert isinstance(reply_filters, list)
for f in reply_filters:
assert isinstance(f, Callback)
self._try_cb(message, f.cb, *f.args, **f.kwargs)
for rid in req_ids:
waiter = self.waiters.get(rid, None)
if waiter:
assert isinstance(waiter, _Waiter)
if req_destination not in waiter.targets:
self.log_error(
f"unexpected reply for {rid} from {req_destination}"
f"req_destination='{req_destination}', expecting={waiter.targets}",
message,
)
return
waiter.received_replies[req_destination] = message
waiter.reply_time[req_destination] = now
time_taken = now - waiter.send_time
self.msg_stats_pool.record_value(category=self._stats_category(message), value=time_taken)
# all targets replied?
all_targets_replied = True
for t in waiter.targets:
if not waiter.reply_time.get(t):
all_targets_replied = False
break
if all_targets_replied:
self.logger.debug(
format_log_message(
self.my_info.fqcn,
message,
f"trigger waiter - replies received from {len(waiter.targets)} targets for {rid}",
)
)
waiter.set() # trigger the waiting requests!
else:
self.logger.debug(
format_log_message(
self.my_info.fqcn,
message,
f"waiting - replies not received from {len(waiter.targets)} targets for req {rid}",
)
)
else:
self.log_error(f"no waiter for req {rid} - the reply is too late", None)
self.sent_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.LATE
)
@staticmethod
def _msg_size_mbs(message: Message):
if message.payload:
msg_size = len(message.payload)
else:
msg_size = 0
return msg_size / _ONE_MB
def _process_received_msg(self, endpoint: Endpoint, connection: Connection, message: Message):
route = message.get_header(MessageHeaderKey.ROUTE)
if route:
origin_name = route[0][0]
t0 = route[0][1]
time_taken = time.time() - t0
self.msg_travel_stats_pool.record_value(
category=f"{origin_name}#{self._stats_category(message)}", value=time_taken
)
self.logger.debug(f"{self.my_info.fqcn}: received message: {message.headers}")
message.set_prop(MessagePropKey.ENDPOINT, endpoint)
if connection:
conn_props = connection.get_conn_properties()
cn = conn_props.get(DriverParams.PEER_CN.value)
if cn:
message.set_prop(MessagePropKey.COMMON_NAME, cn)
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE)
if not msg_type:
raise RuntimeError(format_log_message(self.my_info.fqcn, message, "missing MSG_TYPE in received message"))
origin = message.get_header(MessageHeaderKey.ORIGIN)
if not origin:
raise RuntimeError(
format_log_message(self.my_info.fqcn, message, "missing ORIGIN header in received message")
)
# is this msg for me?
destination = message.get_header(MessageHeaderKey.DESTINATION)
if not destination:
raise RuntimeError(
format_log_message(self.my_info.fqcn, message, "missing DESTINATION header in received message")
)
self.received_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.RECEIVED
)
if msg_type == MessageType.REQ and self.message_interceptor is not None:
reply = self._try_cb(
message, self.message_interceptor, *self.message_interceptor_args, **self.message_interceptor_kwargs
)
if reply:
self.logger.debug(f"{self.my_info.fqcn}: interceptor stopped message!")
reply_expected = message.get_header(MessageHeaderKey.REPLY_EXPECTED)
if not reply_expected:
return
req_id = message.get_header(MessageHeaderKey.REQ_ID, "")
reply.add_headers(
{
MessageHeaderKey.ORIGINAL_HEADERS: message.headers,
MessageHeaderKey.FROM_CELL: self.my_info.fqcn,
MessageHeaderKey.TO_CELL: endpoint.name,
MessageHeaderKey.ORIGIN: self.my_info.fqcn,
MessageHeaderKey.DESTINATION: origin,
MessageHeaderKey.REQ_ID: [req_id],
MessageHeaderKey.MSG_TYPE: MessageType.RETURN,
MessageHeaderKey.ROUTE: [(self.my_info.fqcn, time.time())],
MessageHeaderKey.RETURN_REASON: ReturnReason.INTERCEPT,
}
)
self._send_reply(reply, endpoint)
self.logger.debug(f"{self.my_info.fqcn}: returned intercepted message")
return
if destination != self.my_info.fqcn:
# not for me - need to forward it
self.sent_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.FORWARD
)
self.received_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.FORWARD
)
self._forward(endpoint, origin, destination, msg_type, message)
return
self.received_msg_size_pool.record_value(
category=self._stats_category(message), value=self._msg_size_mbs(message)
)
# this message is for me
self._add_to_route(message)
# handle ad-hoc
my_conn_url = None
if msg_type in [MessageType.REQ, MessageType.REPLY]:
from_cell = message.get_header(MessageHeaderKey.FROM_CELL)
oi = FqcnInfo(origin)
if from_cell != origin and not same_family(oi, self.my_info):
# this is a forwarded message, so no direct path from the origin to me
conn_url = message.get_header(MessageHeaderKey.CONN_URL)
if conn_url:
# the origin already has a listener
# create an ad-hoc connector to connect to the origin cell
self.logger.debug(f"{self.my_info.fqcn}: creating adhoc connector to {origin} at {conn_url}")
self._add_adhoc_connector(origin, conn_url)
elif msg_type == MessageType.REQ:
# see whether we can offer a listener
allow_adhoc = self.connector_manager.is_adhoc_allowed(oi, self.my_info)
if (
allow_adhoc
and (not oi.is_on_server)
and (self.my_info.fqcn > origin or self.my_info.is_on_server)
):
self.logger.debug(f"{self.my_info.fqcn}: trying to offer ad-hoc listener to {origin}")
listener = self._create_external_listener("")
if listener:
my_conn_url = listener.get_connection_url()
if msg_type == MessageType.REQ:
# this is a request for me - dispatch to the right CB
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
reply = self._process_request(origin, message)
if not reply:
self.received_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.REPLY_NONE
)
return
is_optional = message.get_header(MessageHeaderKey.OPTIONAL, False)
reply.set_header(MessageHeaderKey.OPTIONAL, is_optional)
reply_expected = message.get_header(MessageHeaderKey.REPLY_EXPECTED, False)
if not reply_expected:
# this is fire and forget
self.logger.debug(f"{self.my_info.fqcn}: don't send response - request expects no reply")
self.received_msg_counter_pool.increment(
category=self._stats_category(message), counter_name=_CounterName.REPLY_NOT_EXPECTED
)
return
# send the reply back
if not reply.headers.get(MessageHeaderKey.RETURN_CODE):
self.logger.debug(f"{self.my_info.fqcn}: added return code OK")
reply.set_header(MessageHeaderKey.RETURN_CODE, ReturnCode.OK)
req_id = message.get_header(MessageHeaderKey.REQ_ID, "")
reply.add_headers(
{
MessageHeaderKey.CHANNEL: channel,
MessageHeaderKey.TOPIC: topic,
MessageHeaderKey.FROM_CELL: self.my_info.fqcn,
MessageHeaderKey.ORIGIN: self.my_info.fqcn,
MessageHeaderKey.DESTINATION: origin,
MessageHeaderKey.TO_CELL: endpoint.name,
MessageHeaderKey.REQ_ID: req_id,
MessageHeaderKey.MSG_TYPE: MessageType.REPLY,
MessageHeaderKey.ROUTE: [(self.my_info.fqcn, time.time())],
}
)
if my_conn_url:
reply.set_header(MessageHeaderKey.CONN_URL, my_conn_url)
# invoke outgoing reply filters
reply_filters = self.out_reply_filter_reg.find(channel, topic)
if reply_filters:
self.logger.debug(f"{self.my_info.fqcn}: invoking outgoing reply filters")
assert isinstance(reply_filters, list)
for f in reply_filters:
assert isinstance(f, Callback)
r = self._try_cb(reply, f.cb, *f.args, **f.kwargs)
if r:
reply = r
break
self._send_reply(reply, endpoint)
else:
# the message is either a reply or a return for a previous request: handle replies
self._process_reply(origin, message, msg_type)
def _send_reply(self, reply: Message, endpoint: Endpoint):
self.logger.debug(f"{self.my_info.fqcn}: sending reply back to {endpoint.name}")
self.logger.debug(f"Reply message: {reply.headers}")
err = self._send_to_endpoint(endpoint, reply)
if err:
self.log_error(f"error sending reply back to {endpoint.name}: {err}", reply)
self.received_msg_counter_pool.increment(category=self._stats_category(reply), counter_name=err)
else:
self.received_msg_counter_pool.increment(
category=self._stats_category(reply), counter_name=_CounterName.REPLIED
)
rc = reply.get_header(MessageHeaderKey.RETURN_CODE)
self.received_msg_counter_pool.increment(category=self._stats_category(reply), counter_name=rc)
def _check_bulk(self):
while not self.asked_to_stop:
with self.bulk_lock:
for _, sender in self.bulk_senders.items():
sender.send()
time.sleep(self.bulk_check_interval)
# force everything to be flushed
with self.bulk_lock:
for _, sender in self.bulk_senders.items():
sender.send()
def state_change(self, endpoint: Endpoint):
self.logger.debug(f"========= {self.my_info.fqcn}: EP {endpoint.name} state changed to {endpoint.state}")
fqcn = endpoint.name
if endpoint.state == EndpointState.READY:
# create the CellAgent for this endpoint
agent = self.agents.get(fqcn)
if not agent:
agent = CellAgent(fqcn, endpoint)
with self.agent_lock:
self.agents[fqcn] = agent
self.logger.debug(f"{self.my_info.fqcn}: created CellAgent for {fqcn}")
else:
self.logger.debug(f"{self.my_info.fqcn}: found existing CellAgent for {fqcn} - shouldn't happen")
agent.endpoint = endpoint
if self.cell_connected_cb is not None:
try:
self.logger.debug(f"{self.my_info.fqcn}: calling cell_connected_cb")
self.cell_connected_cb(agent, *self.cell_connected_cb_args, **self.cell_connected_cb_kwargs)
except Exception as ex:
self.log_error(
f"exception in cell_connected_cb: {secure_format_exception(ex)}", None, log_except=True
)
elif endpoint.state in [EndpointState.CLOSING, EndpointState.DISCONNECTED, EndpointState.IDLE]:
# remove this agent
with self.agent_lock:
agent = self.agents.pop(fqcn, None)
self.logger.debug(f"{self.my_info.fqcn}: removed CellAgent {fqcn}")
if agent and self.cell_disconnected_cb is not None:
try:
self.logger.debug(f"{self.my_info.fqcn}: calling cell_disconnected_cb")
self.cell_disconnected_cb(
agent, *self.cell_disconnected_cb_args, **self.cell_disconnected_cb_kwargs
)
except Exception as ex:
self.log_error(
f"exception in cell_disconnected_cb: {secure_format_exception(ex)}", None, log_except=True
)
def get_sub_cell_names(self) -> Tuple[List[str], List[str]]:
"""
Get cell FQCNs of all subs, which are children or top-level client cells (if my cell is server).
Returns: fqcns of child cells, fqcns of top-level client cells
"""
children_dict = {}
clients_dict = {}
with self.agent_lock:
for fqcn, agent in self.agents.items():
sub_type = self._is_my_sub(agent.info)
if sub_type == self.SUB_TYPE_CHILD:
children_dict[fqcn] = True
elif sub_type == self.SUB_TYPE_CLIENT:
clients_dict[fqcn] = True
# check local cells
for fqcn in self.ALL_CELLS.keys():
sub_type = self._is_my_sub(FqcnInfo(fqcn))
if sub_type == self.SUB_TYPE_CHILD:
children_dict[fqcn] = True
elif sub_type == self.SUB_TYPE_CLIENT:
clients_dict[fqcn] = True
return list(children_dict.keys()), list(clients_dict.keys())
def _is_my_sub(self, candidate_info: FqcnInfo) -> int:
if FQCN.is_parent(self.my_info.fqcn, candidate_info.fqcn):
return self.SUB_TYPE_CHILD
if self.my_info.is_root and self.my_info.is_on_server:
# see whether the agent is a client root cell
if candidate_info.is_root and not candidate_info.is_on_server:
return self.SUB_TYPE_CLIENT
return self.SUB_TYPE_NONE
| NVFlare-main | nvflare/fuel/f3/cellnet/core_cell.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import copy
import logging
import threading
import uuid
from typing import Dict, List, Union
from nvflare.apis.fl_constant import ServerCommandNames
from nvflare.fuel.f3.cellnet.core_cell import CoreCell, TargetMessage
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey, MessageType, ReturnCode
from nvflare.fuel.f3.cellnet.utils import decode_payload, encode_payload, make_reply
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.stream_const import StreamHeaderKey
from nvflare.fuel.f3.streaming.stream_types import StreamFuture
from nvflare.private.defs import CellChannel
CHANNELS_TO_HANDLE = (CellChannel.SERVER_COMMAND, CellChannel.AUX_COMMUNICATION)
class SimpleWaiter:
def __init__(self, req_id, result):
super().__init__()
self.req_id = req_id
self.result = result
self.receiving_future = None
self.in_receiving = threading.Event()
class Adapter:
def __init__(self, cb, my_info, cell):
self.cb = cb
self.my_info = my_info
self.cell = cell
self.logger = logging.getLogger(self.__class__.__name__)
def call(self, future): # this will be called by StreamCell upon receiving the first byte of blob
headers = future.headers
stream_req_id = headers.get(StreamHeaderKey.STREAM_REQ_ID, "")
origin = headers.get(MessageHeaderKey.ORIGIN, None)
result = future.result()
self.logger.debug(f"{stream_req_id=}: {headers=}, incoming data={result}")
request = Message(headers, result)
decode_payload(request, StreamHeaderKey.PAYLOAD_ENCODING)
channel = request.get_header(StreamHeaderKey.CHANNEL)
request.set_header(MessageHeaderKey.CHANNEL, channel)
topic = request.get_header(StreamHeaderKey.TOPIC)
request.set_header(MessageHeaderKey.TOPIC, topic)
self.logger.info(f"Call back on {stream_req_id=}: {channel=}, {topic=}")
req_id = request.get_header(MessageHeaderKey.REQ_ID, "")
secure = request.get_header(MessageHeaderKey.SECURE, False)
self.logger.debug(f"{stream_req_id=}: on {channel=}, {topic=}")
response = self.cb(request)
self.logger.debug(f"response available: {stream_req_id=}: on {channel=}, {topic=}")
response.add_headers(
{
MessageHeaderKey.REQ_ID: req_id,
MessageHeaderKey.MSG_TYPE: MessageType.REPLY,
StreamHeaderKey.STREAM_REQ_ID: stream_req_id,
}
)
encode_payload(response, StreamHeaderKey.PAYLOAD_ENCODING)
self.logger.debug(f"sending: {stream_req_id=}: {response.headers=}, target={origin}")
reply_future = self.cell.send_blob(CellChannel.RETURN_ONLY, f"{channel}:{topic}", origin, response, secure)
self.logger.debug(f"Done sending: {stream_req_id=}: {reply_future=}")
class Cell(StreamCell):
def __init__(self, *args, **kwargs):
self.core_cell = CoreCell(*args, **kwargs)
super().__init__(self.core_cell)
self.requests_dict = dict()
self.logger = logging.getLogger(self.__class__.__name__)
self.register_blob_cb(CellChannel.RETURN_ONLY, "*", self._process_reply) # this should be one-time registration
def __getattr__(self, func):
def method(*args, **kwargs):
self.logger.debug(f"__getattr__: {args=}, {kwargs=}")
if kwargs.get("channel") in CHANNELS_TO_HANDLE:
self.logger.debug(f"calling cell {func}")
return getattr(self, f"_{func}")(*args, **kwargs)
if not hasattr(self.core_cell, func):
raise AttributeError(f"'{func}' not in core_cell.")
self.logger.debug(f"calling core_cell {func}")
return getattr(self.core_cell, func)(*args, **kwargs)
return method
def _broadcast_request(
self,
channel: str,
topic: str,
targets: Union[str, List[str]],
request: Message,
timeout=None,
secure=False,
optional=False,
) -> Dict[str, Message]:
"""
Send a message over a channel to specified destination cell(s), and wait for reply
Args:
channel: channel for the message
topic: topic of the message
targets: FQCN of the destination cell(s)
request: message to be sent
timeout: how long to wait for replies
secure: End-end encryption
optional: whether the message is optional
Returns: a dict of: cell_id => reply message
"""
self.logger.info(f"broadcast: {channel=}, {topic=}, {targets=}, {timeout=}")
if isinstance(targets, str):
targets = [targets]
target_argument = {}
fixed_dict = dict(channel=channel, topic=topic, timeout=timeout, secure=secure, optional=optional)
results = dict()
future_to_target = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=len(targets)) as executor:
self.logger.debug(f"broadcast to {targets=}")
for t in targets:
req = Message(copy.deepcopy(request.headers), request.payload)
target_argument["request"] = TargetMessage(t, channel, topic, req).message
target_argument["target"] = t
target_argument.update(fixed_dict)
f = executor.submit(self._send_request, **target_argument)
future_to_target[f] = t
self.logger.debug(f"submitted to {t} with {target_argument.keys()=}")
for future in concurrent.futures.as_completed(future_to_target):
target = future_to_target[future]
self.logger.debug(f"{target} completed")
try:
data = future.result()
except Exception as exc:
self.logger.warning(f"{target} raises {exc}")
results[target] = make_reply(ReturnCode.TIMEOUT)
else:
results[target] = data
self.logger.debug(f"{target=}: {data=}")
self.logger.debug("About to return from broadcast_request")
return results
def fire_and_forget(
self, channel: str, topic: str, targets: Union[str, List[str]], message: Message, secure=False, optional=False
) -> Dict[str, str]:
"""
Send a message over a channel to specified destination cell(s), and do not wait for replies.
Args:
channel: channel for the message
topic: topic of the message
targets: one or more destination cell IDs. None means all.
message: message to be sent
secure: End-end encryption if True
optional: whether the message is optional
Returns: None
"""
if channel == CellChannel.SERVER_COMMAND and topic == ServerCommandNames.HANDLE_DEAD_JOB:
encode_payload(message, encoding_key=StreamHeaderKey.PAYLOAD_ENCODING)
result = {}
if isinstance(targets, list):
for target in targets:
self.send_blob(channel=channel, topic=topic, target=target, message=message, secure=secure)
result[target] = ""
else:
self.send_blob(channel=channel, topic=topic, target=targets, message=message, secure=secure)
result[targets] = ""
return result
else:
return self.core_cell.fire_and_forget(
channel=channel, topic=topic, targets=targets, message=message, optional=optional
)
def _get_result(self, req_id):
waiter = self.requests_dict.pop(req_id)
return waiter.result
def _future_wait(self, future, timeout):
last_progress = 0
while not future.waiter.wait(timeout):
current_progress = future.get_progress()
if last_progress == current_progress:
return False
else:
self.logger.debug(f"{current_progress=}")
last_progress = current_progress
return True
def _send_request(self, channel, target, topic, request, timeout=10.0, secure=False, optional=False):
try:
encode_payload(request, StreamHeaderKey.PAYLOAD_ENCODING)
except BaseException as exc:
self.logger.error(f"Can't encode {request=} {exc=}")
raise exc
req_id = str(uuid.uuid4())
request.add_headers({StreamHeaderKey.STREAM_REQ_ID: req_id})
# this future can be used to check sending progress, but not for checking return blob
self.logger.info(f"{req_id=}, {channel=}, {topic=}, {target=}, {timeout=}: send_request about to send_blob")
future = self.send_blob(channel=channel, topic=topic, target=target, message=request, secure=secure)
waiter = SimpleWaiter(req_id=req_id, result=make_reply(ReturnCode.TIMEOUT))
self.requests_dict[req_id] = waiter
self.logger.debug(f"{req_id=}: Waiting starts")
# Three stages, sending, waiting for receiving first byte, receiving
# sending with progress timeout
self.logger.debug(f"{req_id=}: entering sending wait {timeout=}")
sending_complete = self._future_wait(future, timeout)
if not sending_complete:
self.logger.info(f"{req_id=}: sending timeout {timeout=}")
return self._get_result(req_id)
self.logger.debug(f"{req_id=}: sending complete")
# waiting for receiving first byte
self.logger.debug(f"{req_id=}: entering remote process wait {timeout=}")
if not waiter.in_receiving.wait(timeout):
self.logger.info(f"{req_id=}: remote processing timeout {timeout=}")
return self._get_result(req_id)
self.logger.debug(f"{req_id=}: in receiving")
# receiving with progress timeout
r_future = waiter.receiving_future
self.logger.debug(f"{req_id=}: entering receiving wait {timeout=}")
receiving_complete = self._future_wait(r_future, timeout)
if not receiving_complete:
self.logger.info(f"{req_id=}: receiving timeout {timeout=}")
return self._get_result(req_id)
self.logger.debug(f"{req_id=}: receiving complete")
waiter.result = Message(r_future.headers, r_future.result())
decode_payload(waiter.result, encoding_key=StreamHeaderKey.PAYLOAD_ENCODING)
self.logger.debug(f"{req_id=}: return result {waiter.result=}")
result = self._get_result(req_id)
return result
def _process_reply(self, future: StreamFuture):
headers = future.headers
req_id = headers.get(StreamHeaderKey.STREAM_REQ_ID, -1)
self.logger.debug(f"{req_id=}: _process_reply")
try:
waiter = self.requests_dict[req_id]
except KeyError as e:
self.logger.warning(f"Receiving unknown {req_id=}, discarded: {e}")
return
waiter.receiving_future = future
waiter.in_receiving.set()
def _register_request_cb(self, channel: str, topic: str, cb, *args, **kwargs):
"""
Register a callback for handling request. The CB must follow request_cb_signature.
Args:
channel: the channel of the request
topic: topic of the request
cb:
*args:
**kwargs:
Returns:
"""
if not callable(cb):
raise ValueError(f"specified request_cb {type(cb)} is not callable")
if channel in CHANNELS_TO_HANDLE:
self.logger.info(f"Register blob CB for {channel=}, {topic=}")
adapter = Adapter(cb, self.core_cell.my_info, self)
self.register_blob_cb(channel, topic, adapter.call, *args, **kwargs)
else:
self.logger.info(f"Register regular CB for {channel=}, {topic=}")
self.core_cell.register_request_cb(channel, topic, cb, *args, **kwargs)
| NVFlare-main | nvflare/fuel/f3/cellnet/cell.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from typing import Union
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.cellnet.defs import ConnectorRequirementKey
from nvflare.fuel.f3.cellnet.fqcn import FqcnInfo
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.communicator import CommError, Communicator, Mode
from nvflare.security.logging import secure_format_exception, secure_format_traceback
_KEY_RESOURCES = "resources"
_KEY_INT = "internal"
_KEY_ADHOC = "adhoc"
_KEY_SCHEME = "scheme"
_KEY_HOST = "host"
_KEY_PORTS = "ports"
class _Defaults:
ALLOW_ADHOC_CONNECTIONS = False
SCHEME_FOR_INTERNAL_CONNECTIONS = "tcp"
SCHEME_FOR_ADHOC_CONNECTIONS = "tcp"
class ConnectorData:
def __init__(self, handle, connect_url: str, active: bool):
self.handle = handle
self.connect_url = connect_url
self.active = active
def get_connection_url(self):
return self.connect_url
class ConnectorManager:
"""
Manages creation of connectors
"""
def __init__(self, communicator: Communicator, secure: bool, comm_configurator: CommConfigurator):
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
self.communicator = communicator
self.secure = secure
self.bb_conn_gen = comm_configurator.get_backbone_connection_generation(2)
# set up default drivers
self.int_scheme = comm_configurator.get_internal_connection_scheme(_Defaults.SCHEME_FOR_INTERNAL_CONNECTIONS)
self.int_resources = {
_KEY_HOST: "localhost",
}
self.adhoc_allowed = comm_configurator.allow_adhoc_connections(_Defaults.ALLOW_ADHOC_CONNECTIONS)
self.adhoc_scheme = comm_configurator.get_adhoc_connection_scheme(_Defaults.SCHEME_FOR_ADHOC_CONNECTIONS)
self.adhoc_resources = {}
# load config if any
comm_config = comm_configurator.get_config()
if comm_config:
int_conf = self._validate_conn_config(comm_config, _KEY_INT)
if int_conf:
self.int_scheme = int_conf.get(_KEY_SCHEME)
self.int_resources = int_conf.get(_KEY_RESOURCES)
adhoc_conf = self._validate_conn_config(comm_config, _KEY_ADHOC)
if adhoc_conf:
self.adhoc_scheme = adhoc_conf.get(_KEY_SCHEME)
self.adhoc_resources = adhoc_conf.get(_KEY_RESOURCES)
self.logger.debug(f"internal scheme={self.int_scheme}, resources={self.int_resources}")
self.logger.debug(f"adhoc scheme={self.adhoc_scheme}, resources={self.adhoc_resources}")
self.comm_config = comm_config
def get_config_info(self):
return {
"allow_adhoc": self.adhoc_allowed,
"adhoc_scheme": self.adhoc_scheme,
"adhoc_resources": self.adhoc_resources,
"internal_scheme": self.int_scheme,
"internal_resources": self.int_resources,
"config": self.comm_config if self.comm_config else "none",
}
def should_connect_to_server(self, fqcn_info: FqcnInfo) -> bool:
if fqcn_info.gen == 1:
return True
if self.comm_config:
bb_config = self.comm_config.get("backbone")
if bb_config:
gens = bb_config.get("connect_generation")
if gens:
if isinstance(gens, list):
return fqcn_info.gen in gens
else:
return fqcn_info.gen == gens
# use default policy
return fqcn_info.gen <= self.bb_conn_gen
def is_adhoc_allowed(self, c1: FqcnInfo, c2: FqcnInfo) -> bool:
"""
Is adhoc connection allowed between the two cells?
Args:
c1:
c2:
Returns:
"""
if not self.adhoc_allowed:
return False
if c1.root == c2.root:
# same family
return False
# we only allow gen2 (or above) cells to directly connect
if c1.gen >= 2 and c2.gen >= 2:
return True
return False
@staticmethod
def _validate_conn_config(config: dict, key: str) -> Union[None, dict]:
conn_config = config.get(key)
if conn_config:
if not isinstance(conn_config, dict):
raise ConfigError(f"'{key}' must be dict but got {type(conn_config)}")
scheme = conn_config.get(_KEY_SCHEME)
if not scheme:
raise ConfigError(f"missing '{_KEY_SCHEME}' in {key} config")
resources = conn_config.get(_KEY_RESOURCES)
if resources:
if not isinstance(resources, dict):
raise ConfigError(f"'{_KEY_RESOURCES}' in {key} must be dict but got {type(resources)}")
return conn_config
def _get_connector(
self, url: str, active: bool, internal: bool, adhoc: bool, secure: bool
) -> Union[None, ConnectorData]:
if active and not url:
raise RuntimeError("url is required by not provided for active connector!")
ssl_required = False
if not adhoc:
# backbone
if not internal:
# external
if not url:
raise RuntimeError("url is required but not provided for external backbone connector/listener!")
scheme = self.adhoc_scheme
resources = {}
ssl_required = secure
else:
# internal
scheme = self.int_scheme
resources = self.int_resources
else:
# ad-hoc - must be external
if internal:
raise RuntimeError("internal ad-hoc connector not supported")
scheme = self.adhoc_scheme
resources = self.adhoc_resources
self.logger.debug(
f"{os.getpid()}: creating ad-hoc external listener: "
f"active={active} scheme={scheme}, resources={resources}"
)
if not active and not self.adhoc_allowed:
# ad-hoc listener is not allowed!
return None
reqs = {ConnectorRequirementKey.SECURE: ssl_required}
if url:
reqs[ConnectorRequirementKey.URL] = url
reqs.update(resources)
try:
if active:
handle = self.communicator.add_connector(url, Mode.ACTIVE, ssl_required)
connect_url = url
elif url:
handle = self.communicator.add_connector(url, Mode.PASSIVE, ssl_required)
connect_url = url
else:
self.logger.info(f"{os.getpid()}: Try start_listener Listener resources: {reqs}")
handle, connect_url = self.communicator.start_listener(scheme, reqs)
self.logger.debug(f"{os.getpid()}: ############ dynamic listener at {connect_url}")
# Kludge: to wait for listener ready and avoid race
time.sleep(0.5)
return ConnectorData(handle, connect_url, active)
except CommError as ex:
self.logger.error(f"Failed to get connector: {secure_format_exception(ex)}")
return None
except Exception as ex:
self.logger.error(f"Unexpected exception: {secure_format_exception(ex)}")
self.logger.error(secure_format_traceback())
return None
def get_external_listener(self, url: str, adhoc: bool) -> Union[None, ConnectorData]:
"""
Try to get an external listener.
Args:
url:
adhoc:
"""
return self._get_connector(url=url, active=False, internal=False, adhoc=adhoc, secure=self.secure)
def get_external_connector(self, url: str, adhoc: bool) -> Union[None, ConnectorData]:
"""
Try to get an external listener.
Args:
url:
adhoc:
"""
return self._get_connector(url=url, active=True, internal=False, adhoc=adhoc, secure=self.secure)
def get_internal_listener(self) -> Union[None, ConnectorData]:
"""
Try to get an internal listener.
"""
return self._get_connector(url="", active=False, internal=True, adhoc=False, secure=False)
def get_internal_connector(self, url: str) -> Union[None, ConnectorData]:
"""
Try to get an internal listener.
Args:
url:
"""
return self._get_connector(url=url, active=True, internal=True, adhoc=False, secure=False)
| NVFlare-main | nvflare/fuel/f3/cellnet/connector_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/cellnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvflare.fuel.utils.fobs as fobs
from nvflare.fuel.f3.cellnet.defs import Encoding, MessageHeaderKey
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.stream_const import StreamHeaderKey
cell_mapping = {
"O": MessageHeaderKey.ORIGIN,
"D": MessageHeaderKey.DESTINATION,
"F": MessageHeaderKey.FROM_CELL,
"T": MessageHeaderKey.TO_CELL,
}
msg_mapping = {
"CH": MessageHeaderKey.CHANNEL,
"TP": MessageHeaderKey.TOPIC,
"SCH": StreamHeaderKey.CHANNEL,
"STP": StreamHeaderKey.TOPIC,
}
def make_reply(rc: str, error: str = "", body=None) -> Message:
headers = {MessageHeaderKey.RETURN_CODE: rc}
if error:
headers[MessageHeaderKey.ERROR] = error
return Message(headers, payload=body)
def shorten_string(string):
if len(string) > 8:
ss = ":" + string[-7:]
else:
ss = string
return ss
def shorten_fqcn(fqcn):
parts = fqcn.split(".")
s_fqcn = ".".join([shorten_string(p) for p in parts])
return s_fqcn
def get_msg_header_value(m, k):
return m.get_header(k, "?")
def format_log_message(fqcn: str, message: Message, log: str) -> str:
context = [f"[ME={shorten_fqcn(fqcn)}"]
for k, v in cell_mapping.items():
string = f"{k}=" + shorten_fqcn(get_msg_header_value(message, v))
context.append(string)
for k, v in msg_mapping.items():
string = f"{k}=" + get_msg_header_value(message, v)
context.append(string)
return " ".join(context) + f"] {log}"
def encode_payload(message: Message, encoding_key=MessageHeaderKey.PAYLOAD_ENCODING):
encoding = message.get_header(encoding_key)
if not encoding:
if message.payload is None:
encoding = Encoding.NONE
elif isinstance(message.payload, (bytes, bytearray, memoryview)):
encoding = Encoding.BYTES
else:
encoding = Encoding.FOBS
message.payload = fobs.dumps(message.payload)
message.set_header(encoding_key, encoding)
def decode_payload(message: Message, encoding_key=MessageHeaderKey.PAYLOAD_ENCODING):
encoding = message.get_header(encoding_key)
if not encoding:
return
if encoding == Encoding.FOBS:
message.payload = fobs.loads(message.payload)
elif encoding == Encoding.NONE:
message.payload = None
else:
# assume to be bytes
pass
message.remove_header(encoding_key)
| NVFlare-main | nvflare/fuel/f3/cellnet/utils.py |
Subsets and Splits