python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import argparse
import os
import random
import shutil
import time
import warnings
import aistore
from aistore.client import Bck, Client
from aistore.client.transform import WDTransform
import webdataset as wds
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
import torchvision.transforms as transforms
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers (default: 4)",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="use pre-trained model",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training"
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--train-shards",
default="imagenet-train-{000000..000005}.tar",
type=str,
help="template for shards to use for training",
)
parser.add_argument(
"--val-shards",
default="imagenet-train-{000000..000005}.tar",
type=str,
help="template for shards to use for validation",
)
parser.add_argument(
"--ais-endpoint",
default="http://aistore-sample-proxy:51080",
type=str,
help="AIStore proxy endpoint",
)
parser.add_argument(
"--bucket-name", default="imagenet", type=str, help="dataset bucket name"
)
parser.add_argument(
"--bucket-provider",
default="ais",
type=str,
help='bucket provider (eg. "gcp", "aws", "ais")',
)
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def wd_transform(client, pytorch_transform, name):
def transform(sample):
sample["npy"] = (
pytorch_transform(sample.pop("jpg"))
.permute(1, 2, 0)
.numpy()
.astype("float32")
)
return sample
return WDTransform(client, transform, transform_name=name, verbose=True)
def loader(urls, batch_size, workers):
to_tensor = transforms.Compose([transforms.ToTensor()])
etl_dataset = (
wds.WebDataset(urls, handler=wds.handlers.warn_and_continue)
.decode("rgb")
.to_tuple("npy cls", handler=wds.handlers.warn_and_continue)
.map_tuple(to_tensor, lambda x: x)
)
ds_size = (500 * len(urls)) // batch_size
etl_dataset = etl_dataset.with_length(ds_size)
loader = wds.WebLoader(
etl_dataset,
batch_size=batch_size,
num_workers=workers,
)
return loader.with_length(ds_size)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print("using CPU, this will be slow")
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith("alexnet") or args.arch.startswith("vgg"):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
client = Client(args.ais_endpoint)
bck = Bck(args.bucket_name, provider=args.bucket_provider)
train_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
train_etl = wd_transform(client, train_transform, "img-train")
train_urls = client.expand_object_urls(
bck, transform_id=train_etl.uuid, template=args.train_shards
)
train_loader = loader(train_urls, args.batch_size, args.workers)
val_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
val_etl = wd_transform(client, val_transform, "img-val")
val_urls = client.expand_object_urls(
bck, transform_id=val_etl.uuid, template=args.val_shards
)
val_loader = loader(val_urls, args.batch_size, args.workers)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| aistore-master | docs/examples/etl-imagenet-wd/pytorch_wd.py |
import argparse
import os
import random
import shutil
import time
import warnings
import aistore
from aistore.client import Bck
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers (default: 4)",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--pretrained", dest="pretrained", action="store_true", help="use pre-trained model"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training"
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print("using CPU, this will be slow")
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith("alexnet") or args.arch.startswith("vgg"):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
train_loader = torch.utils.data.DataLoader(
aistore.pytorch.Dataset(
"http://aistore-sample-proxy:51080",
Bck("imagenet"), # AIS IP address or hostname
prefix="train/",
transform_id="imagenet-train",
transform_filter=lambda object_name: object_name.endswith(".jpg"),
),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
aistore.pytorch.Dataset(
"http://aistore-sample-proxy:51080",
Bck("imagenet"),
prefix="val/",
transform_id="imagenet-train",
transform_filter=lambda object_name: object_name.endswith(".jpg"),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| aistore-master | docs/examples/etl-imagenet-dataset/train_aistore.py |
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.models as models
from PIL import Image
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet18",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet18)",
)
parser.add_argument(
"-j",
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers (default: 4)",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--pretrained", dest="pretrained", action="store_true", help="use pre-trained model"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training"
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print("using CPU, this will be slow")
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith("alexnet") or args.arch.startswith("vgg"):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_loader = torch.utils.data.DataLoader(
LocalDataset(
os.path.join(args.data, "train"),
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
LocalDataset(
os.path.join(args.data, "val"),
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
class LocalDataset(torch.utils.data.Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
paths = set()
for root_dir, _, fnames in sorted(os.walk(self.root_dir)):
for fname in sorted(fnames):
bname = os.path.splitext(fname)[0]
paths.add(os.path.join(root_dir, bname))
self.samples = []
for path in paths:
target = None
with open(path + ".cls", "r") as f:
target = int(f.read())
self.samples.append((path + ".jpg", target))
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
return sample, target
def loader(self, path: str):
with open(path, "rb") as f:
img = Image.open(f)
return img.convert("RGB")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
| aistore-master | docs/examples/etl-imagenet-dataset/train_pytorch.py |
import os
import io
import sys
from PIL import Image
from torchvision import transforms
import torch
from aistore.pytorch import AISDataset
from aistore.sdk import Client
from aistore.sdk.multiobj import ObjectRange
AISTORE_ENDPOINT = os.getenv("AIS_ENDPOINT", "http://192.168.49.2:8080")
client = Client(AISTORE_ENDPOINT)
bucket_name = "images"
def etl():
def img_to_bytes(img):
buf = io.BytesIO()
img = img.convert('RGB')
img.save(buf, format='JPEG')
return buf.getvalue()
input_bytes = sys.stdin.buffer.read()
image = Image.open(io.BytesIO(input_bytes)).convert('RGB')
preprocessing = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
transforms.ToPILImage(),
transforms.Lambda(img_to_bytes),
])
processed_bytes = preprocessing(image)
sys.stdout.buffer.write(processed_bytes)
def show_image(image_data):
with Image.open(io.BytesIO(image_data)) as image:
image.show()
def load_data():
# First, let's create a bucket and put the data into AIS
bucket = client.bucket(bucket_name).create()
bucket.put_files("images/", pattern="*.jpg")
# Show a random (non-transformed) image from the dataset
image_data = bucket.object("Bengal_171.jpg").get().read_all()
show_image(image_data)
def create_etl(etl_name):
image_etl = client.etl(etl_name)
image_etl.init_code(
transform=etl,
dependencies=["torchvision"],
communication_type="io")
return image_etl
def show_etl(etl):
print(client.cluster().list_running_etls())
print(etl.view())
def get_with_etl(etl):
transformed_data = client.bucket(bucket_name).object("Bengal_171.jpg").get(etl_name=etl.name).read_all()
show_image(transformed_data)
def etl_bucket(etl):
dest_bucket = client.bucket("transformed-images").create()
transform_job = client.bucket(bucket_name).transform(etl_name=etl.name, to_bck=dest_bucket)
client.job(transform_job).wait()
print(entry.name for entry in dest_bucket.list_all_objects())
def etl_group(etl):
dest_bucket = client.bucket("transformed-selected-images").create()
# Select a range of objects from the source bucket
object_range = ObjectRange(min_index=0, max_index=100, prefix="Bengal_", suffix=".jpg")
object_group = client.bucket(bucket_name).objects(obj_range=object_range)
transform_job = object_group.transform(etl_name=etl.name, to_bck=dest_bucket)
client.job(transform_job).wait_for_idle(timeout=300)
print([entry.name for entry in dest_bucket.list_all_objects()])
def create_dataloader():
# Construct a dataset and dataloader to read data from the transformed bucket
dataset = AISDataset(AISTORE_ENDPOINT, "ais://transformed-images")
train_loader = torch.utils.data.DataLoader(dataset, shuffle=True)
return train_loader
if __name__ == "__main__":
load_data()
image_etl = create_etl("transform-images")
show_etl(image_etl)
get_with_etl(image_etl)
etl_bucket(image_etl)
etl_group(image_etl)
data_loader = create_dataloader()
| aistore-master | docs/examples/transform-images-sdk/transform_sdk.py |
import matplotlib.pyplot as plt
import numpy as np
from torchvision import transforms
import webdataset as wds
# Utility that displays even number of images based on loader
# pylint: disable=unused-variable
def display_loader_images(data_loader, objects=2):
test_iter = iter(data_loader)
printed = 0
row = 0
_, axarr = plt.subplots((objects // 2), 2, figsize=(12, 12))
while printed != objects:
img_tensors, _ = next(test_iter)
for img_tensor in img_tensors:
column = printed % 2
img = np.transpose(np.asarray(img_tensor.squeeze()), (1, 2, 0))
img = np.clip(img, 0, 1)
axarr[row, column].set_yticks([])
axarr[row, column].set_xticks([])
axarr[row, column].imshow(img, interpolation="nearest")
printed += 1
if column == 1:
row += 1
if printed == objects:
plt.show()
return
plt.show()
# Utility for displaying images from shard
# pylint: disable=unused-variable
def display_shard_images(client, bucket, tar_name, objects=2, etl_name=""):
to_tensor = transforms.Compose([transforms.ToTensor()])
test_object = (
wds.WebDataset(
client.object_url(bucket, tar_name, transform_id=etl_name),
handler=wds.handlers.warn_and_continue,
)
.decode("rgb")
.to_tuple("jpg;png;jpeg;npy cls", handler=wds.handlers.warn_and_continue)
.map_tuple(to_tensor, lambda x: x)
)
test_loader = wds.WebLoader(
test_object,
batch_size=None,
shuffle=False,
num_workers=1,
)
test_iter = iter(test_loader)
row = 0
_, axarr = plt.subplots((objects // 2), 2, figsize=(12, 12))
for i in range(objects):
column = i % 2
img_tensor, _ = next(test_iter)
plt.figure()
img = np.transpose(np.asarray(img_tensor.squeeze()), (1, 2, 0))
img = np.clip(img, 0, 1)
axarr[row, column].set_yticks([])
axarr[row, column].set_xticks([])
axarr[row, column].imshow(img, interpolation="nearest")
if column == 1:
row += 1
plt.show()
| aistore-master | docs/assets/wd_aistore/utils.py |
import msgpack
import os
def unpack_msgpack(path):
with open(path, "rb") as f:
data = f.read()
files_dict = msgpack.unpackb(data, raw=False)
for name, content in files_dict.items():
fqn = os.path.join("/tmp/unpacked", name)
with open(fqn, "wb") as fh:
fh.write(content)
print("unpacked " + fqn)
if __name__ == "__main__":
unpack_msgpack("/tmp/packed/shard.0")
| aistore-master | cmn/tests/python/unpack.py |
from setuptools import find_packages, setup
with open('README.md', 'r') as f:
long_description = f.read()
with open('VERSION', 'r') as f:
version = f.read().strip()
extras = {
'tfrecord': ['tensorflow >= 1.14.0,!=2.0.x,!=2.1.x,!=2.2.0,!=2.4.0'],
'mxnet': ['mxnet >= 1.6.0,!=1.8.0']
}
extras['all'] = [item for group in extras.values() for item in group]
setup(
name='nvidia-imageinary',
author='NVIDIA Corporation',
author_email='[email protected]',
version=version,
description='A tool to randomly generate image datasets of various resolutions',
long_description=long_description,
packages=find_packages(include=['imagine'], exclude=['tests']),
license='Apache 2.0',
python_requires='>=3.7',
entry_points={
'console_scripts': ['imagine=imagine:_main']
},
install_requires=[
'numpy >= 1.18.0',
'Pillow >= 7.1.2'
],
extras_require=extras
)
| Imageinary-main | setup.py |
Imageinary-main | tests/__init__.py |
|
Imageinary-main | tests/unit/__init__.py |
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from imagine import imagine
class TestUnits:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir
def teardown_method(self):
try:
os.rmdir(str(self.tmpdir))
except OSError:
# The directory wasn't created, as expected
pass
def test_directory_creation_if_not_exist(self):
imagine._try_create_directory(str(self.tmpdir))
def test_error_input_directory_doesnt_exist(self):
with pytest.raises(RuntimeError):
imagine._check_directory_exists(os.path.join(str(self.tmpdir),
'dne'))
def test_record_slice_yields_expected_results(self):
slices = [range(x, x + 100) for x in range(0, 1000, 100)]
results = imagine._record_slice(self.tmpdir,
self.tmpdir,
'test_record_',
range(0, 1000),
100,
10)
for count, result in enumerate(results):
source, dest, name, images, num = result
assert source == self.tmpdir
assert dest == self.tmpdir
assert name == 'test_record_'
assert images == slices[count]
assert num == count
# Enumerate is 0-based, so the final number will be 9 for 10 records
assert count == 10 - 1
| Imageinary-main | tests/unit/test_units.py |
Imageinary-main | tests/functional/__init__.py |
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import os
from glob import glob
from imagine import create_images
from PIL import Image
class TestJPGCreation:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.mkdir('jpg_files')
def teardown_method(self):
for image in glob(f'{str(self.tmpdir)}/*'):
os.remove(image)
os.rmdir(str(self.tmpdir))
def test_creating_one_hundred_images(self):
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'jpg',
0,
False
)
images = glob(f'{str(self.tmpdir)}/*')
assert len(images) == 100
for image in images:
assert re.search(r'tmp_\d+.jpg', image)
with Image.open(image) as im:
assert im.size == (1920, 1080)
def test_creating_one_hundred_4K_images(self):
create_images(
str(self.tmpdir),
'tmp_',
3840,
2160,
100,
'jpg',
0,
False
)
images = glob(f'{str(self.tmpdir)}/*')
assert len(images) == 100
for image in images:
assert re.search(r'tmp_\d+.jpg', image)
with Image.open(image) as im:
assert im.size == (3840, 2160)
| Imageinary-main | tests/functional/test_jpg_creation.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import os
from glob import glob
from imagine import create_images
from PIL import Image
class TestPNGCreation:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.mkdir('png_files')
def teardown_method(self):
for image in glob(f'{str(self.tmpdir)}/*'):
os.remove(image)
os.rmdir(str(self.tmpdir))
def test_creating_one_hundred_images(self):
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'png',
0,
False
)
images = glob(f'{str(self.tmpdir)}/*')
assert len(images) == 100
for image in images:
assert re.search(r'tmp_\d+.png', image)
with Image.open(image) as im:
assert im.size == (1920, 1080)
def test_creating_one_hundred_4K_images(self):
create_images(
str(self.tmpdir),
'tmp_',
3840,
2160,
100,
'png',
0,
False
)
images = glob(f'{str(self.tmpdir)}/*')
assert len(images) == 100
for image in images:
assert re.search(r'tmp_\d+.png', image)
with Image.open(image) as im:
assert im.size == (3840, 2160)
| Imageinary-main | tests/functional/test_png_creation.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import os
from glob import glob
from imagine import create_images, create_recordio
from PIL import Image
class TestRecordIOCreation:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.mkdir('input_files')
self.outdir = tmpdir.mkdir('output_files')
def teardown_method(self):
for image in glob(f'{str(self.tmpdir)}/*'):
os.remove(image)
for record in glob(f'{str(self.outdir)}/*'):
os.remove(record)
os.rmdir(str(self.tmpdir))
os.rmdir(str(self.outdir))
def test_creating_recordio_from_100_jpgs(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'jpg',
0,
False
)
create_recordio(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
100
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 2
for record in records:
assert 'tmprecord_0.idx' in record or \
'tmprecord_0.rec' in record
def test_creating_recordio_from_100_pngs(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'png',
0,
False
)
create_recordio(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
100
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 2
for record in records:
assert 'tmprecord_0.idx' in record or \
'tmprecord_0.rec' in record
def test_creating_recordio_from_100_jpg_multiple_files(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'jpg',
0,
False
)
create_recordio(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
10
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 20
for record in records:
assert re.search(r'tmprecord_\d+.idx', record) or \
re.search(r'tmprecord_\d+.rec', record)
def test_creating_recordio_from_100_pngs_multiple_files(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'png',
0,
False
)
create_recordio(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
10
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 20
for record in records:
assert re.search(r'tmprecord_\d+.idx', record) or \
re.search(r'tmprecord_\d+.rec', record)
| Imageinary-main | tests/functional/test_recordio.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import os
from glob import glob
from imagine import create_images, create_tfrecords
from PIL import Image
class TestTFRecordCreation:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.mkdir('input_files')
self.outdir = tmpdir.mkdir('output_files')
def teardown_method(self):
for image in glob(f'{str(self.tmpdir)}/*'):
os.remove(image)
for record in glob(f'{str(self.outdir)}/*'):
os.remove(record)
os.rmdir(str(self.tmpdir))
os.rmdir(str(self.outdir))
def test_creating_tfrecord_from_100_jpgs(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'jpg',
0,
False
)
create_tfrecords(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
100
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 1
assert 'tmprecord_0' in records[0]
def test_creating_tfrecord_from_100_pngs(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'png',
0,
False
)
create_tfrecords(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
100
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 1
assert 'tmprecord_0' in records[0]
def test_creating_tfrecord_from_100_jpg_multiple_files(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'jpg',
0,
False
)
create_tfrecords(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
10
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 10
for record in records:
assert re.search(r'tmprecord_\d+', record)
def test_creating_tfrecord_from_100_pngs_multiple_files(self):
# Create sample images which will be used as a basis.
create_images(
str(self.tmpdir),
'tmp_',
1920,
1080,
100,
'png',
0,
False
)
create_tfrecords(
str(self.tmpdir),
str(self.outdir),
'tmprecord_',
10
)
records = glob(f'{str(self.outdir)}/*')
assert len(records) == 10
for record in records:
assert re.search(r'tmprecord_\d+', record)
| Imageinary-main | tests/functional/test_tfrecord.py |
#!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import numpy
from argparse import ArgumentParser, Namespace
from PIL import Image
from multiprocessing.pool import Pool
try:
from mxnet.recordio import IRHeader, MXIndexedRecordIO, pack
except ImportError:
IRHeader = None
from time import perf_counter
from typing import Generator, List, NoReturn, Optional, Tuple
from math import ceil
try:
from tensorflow.io import TFRecordWriter
from tensorflow.train import (BytesList,
Example,
Feature,
Features,
Int64List)
except ImportError:
TFRecordWriter = None
STANDARD_IMAGE = 'create-images'
TFRECORD = 'create-tfrecord'
RECORDIO = 'create-recordio'
SUPPORTED_IMAGE_FORMATS = {"jpg": "jpg", "jpeg": "jpg", "bmp": "bmp",
"bitmap": "bmp", "png": "png"}
def _parse_args() -> Namespace:
"""
Parse arguments passed to the application.
A custom argument parser handles multiple commands and options to launch
the desired function.
Returns
-------
Namespace
Returns a ``Namespace`` of all of the arguments that were parsed from
the application during runtime.
"""
message = """
CLI for generating a fake dataset of various quantities at different
resolutions.
Supported file types: .bmp, .png, and .jpg.
Supported record types: TFRecords, and RecordIO.
TFRecords requires an external index file creation step.
"""
parser = ArgumentParser(message)
# Required positional command subparser which should be specified first
commands = parser.add_subparsers(dest='command', metavar='command')
commands_parent = ArgumentParser(add_help=False)
# Options specific to record types
commands_parent.add_argument('source_path', metavar='source-path',
help='Path containing valid input images to '
'convert to records')
commands_parent.add_argument('dest_path', metavar='dest-path',
help='Path to save record files to')
commands_parent.add_argument('name', help='Name to prepend files with, '
'such as "sample_record_"')
commands_parent.add_argument('--img-per-file', type=int, default=1000)
commands.add_parser(TFRECORD, help='Create TFRecords from input images',
parents=[commands_parent])
commands.add_parser(RECORDIO, help='Create RecordIO from input images',
parents=[commands_parent])
# Options specific to generating standard images
standard = commands.add_parser(STANDARD_IMAGE, help='Generate random '
'images')
standard.add_argument('path', help='Path to save images to')
standard.add_argument('name', help='Name to prepend files with, such as '
'"sample_image_"')
standard.add_argument('image_format', metavar='image-format', help='The '
'image format to generate',
choices=SUPPORTED_IMAGE_FORMATS.keys())
standard.add_argument('--width', help='The image width in pixels',
type=int, default=1920)
standard.add_argument('--height', help='The image height in pixels',
type=int, default=1080)
standard.add_argument('--count', help='The number of images to generate',
type=int, default=1)
standard.add_argument('--seed', help='The seed to use while generating '
'random image data', type=int, default=0)
standard.add_argument('--size', help='Display the first image size and '
'the directory size for the images',
action='store_true')
return parser.parse_args()
def _try_create_directory(directory: str) -> NoReturn:
"""
Create a directory if it doesn't exist.
Given a name of a directory as a ``string``, a directory should be created
with the requested name if and only if it doesn't exist already. If the
directory exists, the function will return without any changes.
Parameters
----------
directory : string
A ``string`` of a path pointing to a directory to attempt to create.
"""
os.makedirs(directory, exist_ok=True)
def _check_directory_exists(directory: str) -> NoReturn:
"""
Check if a directory exists.
Check if a requested directory exists and raise an error if not.
Parameters
----------
directory : string
A ``string`` of the requested directory to check.
Raises
------
RuntimeError
Raises a ``RuntimeError`` if the requested directory does not exist.
"""
if not os.path.exists(directory):
raise RuntimeError('Error: Please specify an input directory which '
'contains valid images.')
def create_images(
path: str,
name: str,
width: int,
height: int,
count: int,
image_format: str,
seed: Optional[int] = 0,
size: Optional[bool] = False,
chunksize: Optional[int] = 64
) -> NoReturn:
"""
Randomly generate standard images.
Generate random images of standard formats, such as JPG, PNG, and BMP of
variable height and width. Images are generated by creating a random numpy
array of the requested dimensions and converting the image to the desired
format. All images will be saved in the specified directory with each name
beginning with the passed ``name`` variable and ending with a counter
starting at zero.
Parameters
----------
path : string
The path to the directory to save images to. The directory will be
created if it doesn't exist.
name : string
A ``string`` to prepend to all filenames, such as `random_image_`.
Filenames will end with a counter starting at zero, followed by the
file format's extension.
width : int
The width of the image to generate in pixels.
height : int
The height of the image to generate in pixels.
count : int
The number of images to generate.
image_format : str
The format the images should be saved as. Choices are: {}
seed : int (optional)
A seed to use for numpy for creating the random image data. Defaults
to 0.
size : bool (optional)
If `True`, will print image size information including the size of the
first image and the final directory size.
chunksize : int (optional)
Specify the number of chunks to divide the requested amount of images
into. Higher chunksizes reduce the amount of memory consumed with minor
additional overhead.
""".format(SUPPORTED_IMAGE_FORMATS.keys())
print('Creating {} {} files located at {} of {}x{} resolution with a base '
'base filename of {}'.format(count, image_format, path, width,
height, name))
_try_create_directory(path)
combined_path = os.path.join(path, name)
# Expected to yield a thread pool equivalent to the number of CPU cores in
# the system.
pool = Pool()
try:
start_time = perf_counter()
# NOTE: For very large image counts on memory-constrained systems, this
# can stall-out. Either reduce the image count request, or increase the
# chunk size.
pool.starmap(_image_creation,
((combined_path, width, height, seed, image_format, n)
for n in range(count)),
chunksize=chunksize)
finally:
pool.close()
pool.join()
stop_time = perf_counter()
if size:
_print_image_information(path)
print('Created {} files in {} seconds'.format(count, stop_time-start_time))
def _record_slice(
source_path: str,
dest_path: str,
name: str,
image_files: List[str],
images_per_file: int,
num_of_records: int
) -> Generator[Tuple[str, str, str, List[str], int], None, None]:
"""
Generate subcomponents for a thread.
While creating RecordIO files, a tuple needs to be generated to pass to
every thread in a multiprocessing pool. Each tuple corresponds with a
unique record file with a new path, name, and subset of images. The subset
of images is calculated by taking the first N-images where
N = (total images) / (number of records). The next subset begins at N + 1
and so on.
Parameters
----------
source_path : string
Path to the directory where the input images are stored.
dest_path : string
Path to the directory where the record files should be saved. Will be
created if it does not exist.
name : string
A ``string`` to prepend to all filenames, such as `random_record_`.
Filenames will end with a counter starting at zero, followed by the
file format's extension.
image_files : list
A ``list`` of ``strings`` of the image filenames to use for the record
files.
images_per_file : int
The number of images to include per record file.
num_of_records : int
The total number of record files to create. Note that one record
assumes a record file plus a corresponding index file.
Returns
-------
Generator
Yields a ``tuple`` of objects specific to each record file. The tuple
includes the `source_path` as a ``string``, `dest_path` as a
``string``, `name` as a ``string``, a subset of image names from
`image_files` as a ``list`` of ``strings``, and a counter for the
record file starting at 0 as an ``int``.
"""
for num in range(num_of_records):
subset = num * images_per_file
yield (source_path,
dest_path,
name,
image_files[subset:(subset + images_per_file)],
num)
def create_recordio(
source_path: str,
dest_path: str,
name: str,
img_per_file: int
) -> NoReturn:
"""
Create RecordIO files based on standard images.
Generate one or multiple RecordIO records based on standard input images.
Records are created by specifying an input path containing standard image
files in JPG, PNG, or BMP format, an output directory to save the images
to, a name to prepend the records with, and the number of record files to
generate. Each record file contains N images where N is the total number of
images in the input directory divided by the number of images per record
file. Images are pulled sequentially from the input directory and placed
into each record.
Parameters
----------
source_path : string
Path to the directory where the input images are stored.
dest_path : string
Path to the directory where the record files should be saved. Will be
created if it does not exist.
name : string
A ``string`` to prepend to all filenames, such as `random_record_`.
Filenames will end with a counter starting at zero, followed by the
file format's extension.
images_per_file : int
The number of images to include per record file.
"""
print('Creating RecordIO files at {} from {} targeting {} files per '
'record with a base filename of {}'.format(dest_path,
source_path,
img_per_file,
name))
if not IRHeader:
raise ImportError('MXNet not found! Please install MXNet dependency '
'using "pip install nvidia-imageinary[\'mxnet\']".')
image_files = []
source_path = os.path.abspath(source_path)
dest_path = os.path.abspath(dest_path)
_check_directory_exists(source_path)
_try_create_directory(dest_path)
_print_image_information(source_path)
for image_name in os.listdir(source_path):
if not os.path.isdir(os.path.join(source_path, image_name)):
image_files.append(image_name)
num_of_records = ceil(len(image_files) / img_per_file)
pool = Pool()
try:
start_time = perf_counter()
pool.starmap(_recordio_creation,
_record_slice(source_path,
dest_path,
name,
image_files,
img_per_file,
num_of_records))
finally:
pool.close()
pool.join()
stop_time = perf_counter()
print('Completed in {} seconds'.format(stop_time-start_time))
def create_tfrecords(
source_path: str,
dest_path: str,
name: str,
img_per_file: int
) -> NoReturn:
"""
Create TFRecords based on standard images.
Generate one or multiple TFRecords based on standard input images. Records
are created by specifying an input path containing standard image files in
JPG, PNG, or BMP format, an output directory to save the images to, a name
to prepend the records with, and the number of record files to generate.
Each record file contains N images where N is the total number of images in
the input directory divided by the number of images per record file. Images
are pulled sequentially from the input directory and placed into each
record.
Parameters
----------
source_path : string
Path to the directory where the input images are stored.
dest_path : string
Path to the directory where the record files should be saved. Will be
created if it does not exist.
name : string
A ``string`` to prepend to all filenames, such as `random_record_`.
Filenames will end with a counter starting at zero.
images_per_file : int
The number of images to include per record file.
"""
print('Creating TFRecord files at {} from {} targeting {} files per '
'TFRecord with a base filename of {}'.format(dest_path,
source_path,
img_per_file,
name))
if not TFRecordWriter:
raise ImportError('TensorFlow not found! Please install TensorFlow '
'dependency using "pip install '
'nvidia-imageinary[\'tfrecord\']".')
_check_directory_exists(source_path)
_try_create_directory(dest_path)
combined_path = os.path.join(dest_path, name)
_print_image_information(source_path)
image_count = 0
record = 0
start_time = perf_counter()
writer = TFRecordWriter(combined_path + str(record))
for image_name in os.listdir(source_path):
image_path = os.path.join(source_path, image_name)
if os.path.isdir(image_path):
continue
image_count += 1
if image_count > img_per_file:
image_count = 1
writer.close()
record += 1
writer = TFRecordWriter(combined_path + str(record))
with open(image_path, 'rb') as image_file:
image = image_file.read()
feature = {
'image/encoded': Feature(bytes_list=BytesList(value=[image])),
'image/class/label': Feature(int64_list=Int64List(value=[0]))
}
tfrecord_entry = Example(features=Features(feature=feature))
writer.write(tfrecord_entry.SerializeToString())
writer.close()
stop_time = perf_counter()
print('Completed in {} seconds'.format(stop_time-start_time))
def _print_image_information(path: str) -> NoReturn:
"""
Print the image and directory size.
Print the size of the first image in the directory, which is assumed to be
a good approximator for the average image size of all images in the
directory, as well as the total size of the directory, in bytes.
Parameters
----------
path : string
The path to the directory where generated images are stored.
"""
is_first_image = True
first_image_size = 0
directory_size = 0
for image_name in os.listdir(path):
image_path = os.path.join(path, image_name)
if os.path.isdir(image_path):
continue
directory_size += os.path.getsize(image_path)
if is_first_image:
first_image_size = directory_size
is_first_image = False
print('First image size from {}, in bytes: {}'.format(path,
first_image_size))
print('Directory {} size, in bytes: {}'.format(path, directory_size))
def _recordio_creation(
source_path: str,
dest_path: str,
name: str,
image_files: List[str],
n: int
) -> NoReturn:
"""
Create a RecordIO file based on input images.
Given a subset of images, a RecordIO file should be created with a
corresponding index file with the given name and counter.
Parameters
----------
source_path : string
Path to the directory where the input images are stored.
dest_path : string
Path to the directory where the record files should be saved. Will be
created if it does not exist.
name : string
A ``string`` to prepend the record filename with.
image_files : list
A ``list`` of ``strings`` of image filenames to be used for the record
creation.
n : int
An ``integer`` of the current count the record file points to, starting
at zero.
"""
combined_path = os.path.join(dest_path, name)
regex = re.compile(r'\d+')
dataset_rec = combined_path + str(n) + '.rec'
dataset_idx = combined_path + str(n) + '.idx'
recordio_ds = MXIndexedRecordIO(os.path.join(dest_path, dataset_idx),
os.path.join(dest_path, dataset_rec),
'w')
for image_name in image_files:
image_path = os.path.join(source_path, image_name)
image_index = int(regex.findall(image_name)[0])
header = IRHeader(0, 0, image_index, 0)
image = open(image_path, "rb").read()
packed_image = pack(header, image)
recordio_ds.write_idx(image_index, packed_image)
recordio_ds.close()
def _image_creation(
combined_path: str,
width: int,
height: int,
seed: int,
image_format: str,
n: int
) -> NoReturn:
"""
Generate a random image.
Given a name, dimensions, a seed, and an image format, a random image is
generated by creating a numpy array of random data for the specified
dimensions and three color channels, then converting the array to an image
of the specified format and saving the result to the output directory with
the requested name postfixed with with the zero-based image counter and the
file extension.
Parameters
----------
combined_path : string
The full path to the output image file including the requested name as
a prefix for the filename.
width : int
The width of the image to generate in pixels.
height : int
The height of the image to generate in pixels.
image_format : str
The format the images should be saved as.
n : int
The zero-based counter for the image.
"""
numpy.random.seed(seed + n)
a = numpy.random.rand(height, width, 3) * 255
file_ext = SUPPORTED_IMAGE_FORMATS.get(image_format.lower(), 'png')
if file_ext == "jpg":
im_out = Image.fromarray(a.astype('uint8')).convert('RGB')
else:
im_out = Image.fromarray(a.astype('uint8')).convert('RGBA')
im_out.save('%s%d.%s' % (combined_path, n, file_ext))
def _main() -> NoReturn:
"""
Randomly generate images or record files.
Create standard images or record files using randomized data to be ingested
into a deep learning application.
"""
args = _parse_args()
if args.command == STANDARD_IMAGE:
create_images(args.path, args.name, args.width, args.height,
args.count, args.image_format, args.seed, args.size)
elif args.command == TFRECORD:
create_tfrecords(args.source_path, args.dest_path, args.name,
args.img_per_file)
elif args.command == RECORDIO:
create_recordio(args.source_path, args.dest_path, args.name,
args.img_per_file)
| Imageinary-main | imagine/imagine.py |
from imagine.imagine import (create_images,
create_recordio,
create_tfrecords,
_main)
| Imageinary-main | imagine/__init__.py |
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import os
import shutil
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def build_env(config, config_name, path):
t_path = os.path.join(path, config_name)
if config != t_path:
os.makedirs(path, exist_ok=True)
shutil.copyfile(config, os.path.join(path, config_name)) | BigVGAN-main | env.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
import activations
from utils import init_weights, get_padding
from alias_free_torch import *
LRELU_SLOPE = 0.1
class AMPBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
super(AMPBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
self.activations = nn.ModuleList([
Activation1d(
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
])
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
self.activations = nn.ModuleList([
Activation1d(
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
])
else:
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
def forward(self, x):
acts1, acts2 = self.activations[::2], self.activations[1::2]
for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
xt = a1(x)
xt = c1(xt)
xt = a2(xt)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class AMPBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
super(AMPBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
self.num_layers = len(self.convs) # total number of conv layers
if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
self.activations = nn.ModuleList([
Activation1d(
activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
])
elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
self.activations = nn.ModuleList([
Activation1d(
activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
for _ in range(self.num_layers)
])
else:
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
def forward(self, x):
for c, a in zip (self.convs, self.activations):
xt = a(x)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class BigVGAN(torch.nn.Module):
# this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
def __init__(self, h):
super(BigVGAN, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
# pre conv
self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
# define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
# transposed conv-based upsamplers. does not apply anti-aliasing
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(nn.ModuleList([
weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i),
h.upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k - u) // 2))
]))
# residual blocks using anti-aliased multi-periodicity composition modules (AMP)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
# post conv
if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale)
self.activation_post = Activation1d(activation=activation_post)
elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
self.activation_post = Activation1d(activation=activation_post)
else:
raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
# weight initialization
for i in range(len(self.ups)):
self.ups[i].apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
# pre conv
x = self.conv_pre(x)
for i in range(self.num_upsamples):
# upsampling
for i_up in range(len(self.ups[i])):
x = self.ups[i][i_up](x)
# AMP blocks
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
# post conv
x = self.activation_post(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
for l_i in l:
remove_weight_norm(l_i)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, h, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.d_mult = h.discriminator_channel_mult
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, int(32*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(int(32*self.d_mult), int(128*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(int(128*self.d_mult), int(512*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(int(512*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(int(1024*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(int(1024*self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, h):
super(MultiPeriodDiscriminator, self).__init__()
self.mpd_reshapes = h.mpd_reshapes
print("mpd_reshapes: {}".format(self.mpd_reshapes))
discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
self.discriminators = nn.ModuleList(discriminators)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorR(nn.Module):
def __init__(self, cfg, resolution):
super().__init__()
self.resolution = resolution
assert len(self.resolution) == 3, \
"MRD layer requires list with len=3, got {}".format(self.resolution)
self.lrelu_slope = LRELU_SLOPE
norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
if hasattr(cfg, "mrd_use_spectral_norm"):
print("INFO: overriding MRD use_spectral_norm as {}".format(cfg.mrd_use_spectral_norm))
norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
self.d_mult = cfg.discriminator_channel_mult
if hasattr(cfg, "mrd_channel_mult"):
print("INFO: overriding mrd channel multiplier as {}".format(cfg.mrd_channel_mult))
self.d_mult = cfg.mrd_channel_mult
self.convs = nn.ModuleList([
norm_f(nn.Conv2d(1, int(32*self.d_mult), (3, 9), padding=(1, 4))),
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 3), padding=(1, 1))),
])
self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
def forward(self, x):
fmap = []
x = self.spectrogram(x)
x = x.unsqueeze(1)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, self.lrelu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def spectrogram(self, x):
n_fft, hop_length, win_length = self.resolution
x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect')
x = x.squeeze(1)
x = torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True)
x = torch.view_as_real(x) # [B, F, TT, 2]
mag = torch.norm(x, p=2, dim =-1) #[B, F, TT]
return mag
class MultiResolutionDiscriminator(nn.Module):
def __init__(self, cfg, debug=False):
super().__init__()
self.resolutions = cfg.resolutions
assert len(self.resolutions) == 3,\
"MRD requires list of list with len=3, each element having a list with len=3. got {}".\
format(self.resolutions)
self.discriminators = nn.ModuleList(
[DiscriminatorR(cfg, resolution) for resolution in self.resolutions]
)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(x=y)
y_d_g, fmap_g = d(x=y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| BigVGAN-main | models.py |
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
from torch import nn, sin, pow
from torch.nn import Parameter
class Snake(nn.Module):
'''
Implementation of a sine-based periodic activation function
Shape:
- Input: (B, C, T)
- Output: (B, C, T), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
https://arxiv.org/abs/2006.08195
Examples:
>>> a1 = snake(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
'''
Initialization.
INPUT:
- in_features: shape of the input
- alpha: trainable parameter
alpha is initialized to 1 by default, higher values = higher-frequency.
alpha will be trained along with the rest of your model.
'''
super(Snake, self).__init__()
self.in_features = in_features
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
Snake ∶= x + 1/a * sin^2 (xa)
'''
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
if self.alpha_logscale:
alpha = torch.exp(alpha)
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x
class SnakeBeta(nn.Module):
'''
A modified Snake function which uses separate parameters for the magnitude of the periodic components
Shape:
- Input: (B, C, T)
- Output: (B, C, T), same shape as the input
Parameters:
- alpha - trainable parameter that controls frequency
- beta - trainable parameter that controls magnitude
References:
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
https://arxiv.org/abs/2006.08195
Examples:
>>> a1 = snakebeta(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
'''
Initialization.
INPUT:
- in_features: shape of the input
- alpha - trainable parameter that controls frequency
- beta - trainable parameter that controls magnitude
alpha is initialized to 1 by default, higher values = higher-frequency.
beta is initialized to 1 by default, higher values = higher-magnitude.
alpha will be trained along with the rest of your model.
'''
super(SnakeBeta, self).__init__()
self.in_features = in_features
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
self.beta = Parameter(torch.zeros(in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.beta = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.beta.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
SnakeBeta ∶= x + 1/b * sin^2 (xa)
'''
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
beta = self.beta.unsqueeze(0).unsqueeze(-1)
if self.alpha_logscale:
alpha = torch.exp(alpha)
beta = torch.exp(beta)
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x | BigVGAN-main | activations.py |
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import glob
import os
import matplotlib
import torch
from torch.nn.utils import weight_norm
matplotlib.use("Agg")
import matplotlib.pylab as plt
from meldataset import MAX_WAV_VALUE
from scipy.io.wavfile import write
def plot_spectrogram(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def plot_spectrogram_clipped(spectrogram, clip_max=2.):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none', vmin=1e-6, vmax=clip_max)
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def apply_weight_norm(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_norm(m)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def save_checkpoint(filepath, obj):
print("Saving checkpoint to {}".format(filepath))
torch.save(obj, filepath)
print("Complete.")
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '????????')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
def save_audio(audio, path, sr):
# wav: torch with 1d shape
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
write(path, sr, audio) | BigVGAN-main | utils.py |
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import numpy as np
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import MAX_WAV_VALUE
from models import BigVGAN as Generator
h = None
device = None
torch.backends.cudnn.benchmark = False
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a, h):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_mels_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# load the mel spectrogram in .npy format
x = np.load(os.path.join(a.input_mels_dir, filname))
x = torch.FloatTensor(x).to(device)
if len(x.shape) == 2:
x = x.unsqueeze(0)
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated_e2e.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_mels_dir', default='test_mel_files')
parser.add_argument('--output_dir', default='generated_files_from_mel')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a, h)
if __name__ == '__main__':
main()
| BigVGAN-main | inference_e2e.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
import torch.multiprocessing as mp
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist, MAX_WAV_VALUE
from models import BigVGAN, MultiPeriodDiscriminator, MultiResolutionDiscriminator,\
feature_loss, generator_loss, discriminator_loss
from utils import plot_spectrogram, plot_spectrogram_clipped, scan_checkpoint, load_checkpoint, save_checkpoint, save_audio
import torchaudio as ta
from pesq import pesq
from tqdm import tqdm
import auraloss
torch.backends.cudnn.benchmark = False
def train(rank, a, h):
if h.num_gpus > 1:
# initialize distributed
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
# set seed and device
torch.cuda.manual_seed(h.seed)
torch.cuda.set_device(rank)
device = torch.device('cuda:{:d}'.format(rank))
# define BigVGAN generator
generator = BigVGAN(h).to(device)
print("Generator params: {}".format(sum(p.numel() for p in generator.parameters())))
# define discriminators. MPD is used by default
mpd = MultiPeriodDiscriminator(h).to(device)
print("Discriminator mpd params: {}".format(sum(p.numel() for p in mpd.parameters())))
# define additional discriminators. BigVGAN uses MRD as default
mrd = MultiResolutionDiscriminator(h).to(device)
print("Discriminator mrd params: {}".format(sum(p.numel() for p in mrd.parameters())))
# create or scan the latest checkpoint from checkpoints directory
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
# load the latest checkpoint if exists
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
mrd.load_state_dict(state_dict_do['mrd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
# initialize DDP, optimizers, and schedulers
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
mrd = DistributedDataParallel(mrd, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(mrd.parameters(), mpd.parameters()),
h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
# define training and validation datasets
# unseen_validation_filelist will contain sample filepaths outside the seen training & validation dataset
# example: trained on LibriTTS, validate on VCTK
training_filelist, validation_filelist, list_unseen_validation_filelist = get_dataset_filelist(a)
trainset = MelDataset(training_filelist, h, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device,
fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir, is_seen=True)
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False,
sampler=train_sampler,
batch_size=h.batch_size,
pin_memory=True,
drop_last=True)
if rank == 0:
validset = MelDataset(validation_filelist, h, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir, is_seen=True)
validation_loader = DataLoader(validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
list_unseen_validset = []
list_unseen_validation_loader = []
for i in range(len(list_unseen_validation_filelist)):
unseen_validset = MelDataset(list_unseen_validation_filelist[i], h, h.segment_size, h.n_fft, h.num_mels,
h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning,
base_mels_path=a.input_mels_dir, is_seen=False)
unseen_validation_loader = DataLoader(unseen_validset, num_workers=1, shuffle=False,
sampler=None,
batch_size=1,
pin_memory=True,
drop_last=True)
list_unseen_validset.append(unseen_validset)
list_unseen_validation_loader.append(unseen_validation_loader)
# Tensorboard logger
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
if a.save_audio: # also save audio to disk if --save_audio is set to True
os.makedirs(os.path.join(a.checkpoint_path, 'samples'), exist_ok=True)
# validation loop
# "mode" parameter is automatically defined as (seen or unseen)_(name of the dataset)
# if the name of the dataset contains "nonspeech", it skips PESQ calculation to prevent errors
def validate(rank, a, h, loader, mode="seen"):
assert rank == 0, "validate should only run on rank=0"
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
val_pesq_tot = 0
val_mrstft_tot = 0
# modules for evaluation metrics
pesq_resampler = ta.transforms.Resample(h.sampling_rate, 16000).cuda()
loss_mrstft = auraloss.freq.MultiResolutionSTFTLoss(device="cuda")
if a.save_audio: # also save audio to disk if --save_audio is set to True
os.makedirs(os.path.join(a.checkpoint_path, 'samples', 'gt_{}'.format(mode)), exist_ok=True)
os.makedirs(os.path.join(a.checkpoint_path, 'samples', '{}_{:08d}'.format(mode, steps)), exist_ok=True)
with torch.no_grad():
print("step {} {} speaker validation...".format(steps, mode))
# loop over validation set and compute metrics
for j, batch in tqdm(enumerate(loader)):
x, y, _, y_mel = batch
y = y.to(device)
if hasattr(generator, 'module'):
y_g_hat = generator.module(x.to(device))
else:
y_g_hat = generator(x.to(device))
y_mel = y_mel.to(device, non_blocking=True)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
# PESQ calculation. only evaluate PESQ if it's speech signal (nonspeech PESQ will error out)
if not "nonspeech" in mode: # skips if the name of dataset (in mode string) contains "nonspeech"
# resample to 16000 for pesq
y_16k = pesq_resampler(y)
y_g_hat_16k = pesq_resampler(y_g_hat.squeeze(1))
y_int_16k = (y_16k[0] * MAX_WAV_VALUE).short().cpu().numpy()
y_g_hat_int_16k = (y_g_hat_16k[0] * MAX_WAV_VALUE).short().cpu().numpy()
val_pesq_tot += pesq(16000, y_int_16k, y_g_hat_int_16k, 'wb')
# MRSTFT calculation
val_mrstft_tot += loss_mrstft(y_g_hat.squeeze(1), y).item()
# log audio and figures to Tensorboard
if j % a.eval_subsample == 0: # subsample every nth from validation set
if steps >= 0:
sw.add_audio('gt_{}/y_{}'.format(mode, j), y[0], steps, h.sampling_rate)
if a.save_audio: # also save audio to disk if --save_audio is set to True
save_audio(y[0], os.path.join(a.checkpoint_path, 'samples', 'gt_{}'.format(mode), '{:04d}.wav'.format(j)), h.sampling_rate)
sw.add_figure('gt_{}/y_spec_{}'.format(mode, j), plot_spectrogram(x[0]), steps)
sw.add_audio('generated_{}/y_hat_{}'.format(mode, j), y_g_hat[0], steps, h.sampling_rate)
if a.save_audio: # also save audio to disk if --save_audio is set to True
save_audio(y_g_hat[0, 0], os.path.join(a.checkpoint_path, 'samples', '{}_{:08d}'.format(mode, steps), '{:04d}.wav'.format(j)), h.sampling_rate)
# spectrogram of synthesized audio
y_hat_spec = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels,
h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax)
sw.add_figure('generated_{}/y_hat_spec_{}'.format(mode, j),
plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps)
# visualization of spectrogram difference between GT and synthesized audio
# difference higher than 1 is clipped for better visualization
spec_delta = torch.clamp(torch.abs(x[0] - y_hat_spec.squeeze(0).cpu()), min=1e-6, max=1.)
sw.add_figure('delta_dclip1_{}/spec_{}'.format(mode, j),
plot_spectrogram_clipped(spec_delta.numpy(), clip_max=1.), steps)
val_err = val_err_tot / (j + 1)
val_pesq = val_pesq_tot / (j + 1)
val_mrstft = val_mrstft_tot / (j + 1)
# log evaluation metrics to Tensorboard
sw.add_scalar("validation_{}/mel_spec_error".format(mode), val_err, steps)
sw.add_scalar("validation_{}/pesq".format(mode), val_pesq, steps)
sw.add_scalar("validation_{}/mrstft".format(mode), val_mrstft, steps)
generator.train()
# if the checkpoint is loaded, start with validation loop
if steps != 0 and rank == 0 and not a.debug:
if not a.skip_seen:
validate(rank, a, h, validation_loader,
mode="seen_{}".format(train_loader.dataset.name))
for i in range(len(list_unseen_validation_loader)):
validate(rank, a, h, list_unseen_validation_loader[i],
mode="unseen_{}".format(list_unseen_validation_loader[i].dataset.name))
# exit the script if --evaluate is set to True
if a.evaluate:
exit()
# main training loop
generator.train()
mpd.train()
mrd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch+1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
y_mel = y_mel.to(device, non_blocking=True)
y = y.unsqueeze(1)
y_g_hat = generator(x)
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size,
h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MRD
y_ds_hat_r, y_ds_hat_g, _, _ = mrd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
# whether to freeze D for initial training steps
if steps >= a.freeze_step:
loss_disc_all.backward()
grad_norm_mpd = torch.nn.utils.clip_grad_norm_(mpd.parameters(), 1000.)
grad_norm_mrd = torch.nn.utils.clip_grad_norm_(mrd.parameters(), 1000.)
optim_d.step()
else:
print("WARNING: skipping D training for the first {} steps".format(a.freeze_step))
grad_norm_mpd = 0.
grad_norm_mrd = 0.
# generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
# MPD loss
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
# MRD loss
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = mrd(y, y_g_hat)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
if steps >= a.freeze_step:
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
else:
print("WARNING: using regression loss only for G for the first {} steps".format(a.freeze_step))
loss_gen_all = loss_mel
loss_gen_all.backward()
grad_norm_g = torch.nn.utils.clip_grad_norm_(generator.parameters(), 1000.)
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
with torch.no_grad():
mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
print('Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.
format(steps, loss_gen_all, mel_error, time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'mpd': (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
'mrd': (mrd.module if h.num_gpus > 1 else mrd).state_dict(),
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'steps': steps,
'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
sw.add_scalar("training/mel_spec_error", mel_error, steps)
sw.add_scalar("training/fm_loss_mpd", loss_fm_f.item(), steps)
sw.add_scalar("training/gen_loss_mpd", loss_gen_f.item(), steps)
sw.add_scalar("training/disc_loss_mpd", loss_disc_f.item(), steps)
sw.add_scalar("training/grad_norm_mpd", grad_norm_mpd, steps)
sw.add_scalar("training/fm_loss_mrd", loss_fm_s.item(), steps)
sw.add_scalar("training/gen_loss_mrd", loss_gen_s.item(), steps)
sw.add_scalar("training/disc_loss_mrd", loss_disc_s.item(), steps)
sw.add_scalar("training/grad_norm_mrd", grad_norm_mrd, steps)
sw.add_scalar("training/grad_norm_g", grad_norm_g, steps)
sw.add_scalar("training/learning_rate_d", scheduler_d.get_last_lr()[0], steps)
sw.add_scalar("training/learning_rate_g", scheduler_g.get_last_lr()[0], steps)
sw.add_scalar("training/epoch", epoch+1, steps)
# validation
if steps % a.validation_interval == 0:
# plot training input x so far used
for i_x in range(x.shape[0]):
sw.add_figure('training_input/x_{}'.format(i_x), plot_spectrogram(x[i_x].cpu()), steps)
sw.add_audio('training_input/y_{}'.format(i_x), y[i_x][0], steps, h.sampling_rate)
# seen and unseen speakers validation loops
if not a.debug and steps != 0:
validate(rank, a, h, validation_loader,
mode="seen_{}".format(train_loader.dataset.name))
for i in range(len(list_unseen_validation_loader)):
validate(rank, a, h, list_unseen_validation_loader[i],
mode="unseen_{}".format(list_unseen_validation_loader[i].dataset.name))
steps += 1
scheduler_g.step()
scheduler_d.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--input_wavs_dir', default='LibriTTS')
parser.add_argument('--input_mels_dir', default='ft_dataset')
parser.add_argument('--input_training_file', default='LibriTTS/train-full.txt')
parser.add_argument('--input_validation_file', default='LibriTTS/val-full.txt')
parser.add_argument('--list_input_unseen_wavs_dir', nargs='+', default=['LibriTTS', 'LibriTTS'])
parser.add_argument('--list_input_unseen_validation_file', nargs='+', default=['LibriTTS/dev-clean.txt', 'LibriTTS/dev-other.txt'])
parser.add_argument('--checkpoint_path', default='exp/bigvgan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=100000, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=50000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=50000, type=int)
parser.add_argument('--freeze_step', default=0, type=int,
help='freeze D for the first specified steps. G only uses regression loss for these steps.')
parser.add_argument('--fine_tuning', default=False, type=bool)
parser.add_argument('--debug', default=False, type=bool,
help="debug mode. skips validation loop throughout training")
parser.add_argument('--evaluate', default=False, type=bool,
help="only run evaluation from checkpoint and exit")
parser.add_argument('--eval_subsample', default=5, type=int,
help="subsampling during evaluation loop")
parser.add_argument('--skip_seen', default=False, type=bool,
help="skip seen dataset. useful for test set inference")
parser.add_argument('--save_audio', default=False, type=bool,
help="save audio of test set inference to disk")
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
h.num_gpus = torch.cuda.device_count()
h.batch_size = int(h.batch_size / h.num_gpus)
print('Batch size per GPU :', h.batch_size)
else:
pass
if h.num_gpus > 1:
mp.spawn(train, nprocs=h.num_gpus, args=(a, h,))
else:
train(0, a, h)
if __name__ == '__main__':
main()
| BigVGAN-main | train.py |
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE
from models import BigVGAN as Generator
import librosa
h = None
device = None
torch.backends.cudnn.benchmark = False
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a, h):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# load the ground truth audio and resample if necessary
wav, sr = librosa.load(os.path.join(a.input_wavs_dir, filname), h.sampling_rate, mono=True)
wav = torch.FloatTensor(wav).to(device)
# compute mel spectrogram from the ground truth audio
x = get_mel(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a, h)
if __name__ == '__main__':
main()
| BigVGAN-main | inference.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.util import normalize
from scipy.io.wavfile import read
from librosa.filters import mel as librosa_mel_fn
import pathlib
from tqdm import tqdm
MAX_WAV_VALUE = 32768.0
def load_wav(full_path, sr_target):
sampling_rate, data = read(full_path)
if sampling_rate != sr_target:
raise RuntimeError("Sampling rate of the file {} is {} Hz, but the model requires {} Hz".
format(full_path, sampling_rate, sr_target))
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def get_dataset_filelist(a):
with open(a.input_training_file, 'r', encoding='utf-8') as fi:
training_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
print("first training file: {}".format(training_files[0]))
with open(a.input_validation_file, 'r', encoding='utf-8') as fi:
validation_files = [os.path.join(a.input_wavs_dir, x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
print("first validation file: {}".format(validation_files[0]))
list_unseen_validation_files = []
for i in range(len(a.list_input_unseen_validation_file)):
with open(a.list_input_unseen_validation_file[i], 'r', encoding='utf-8') as fi:
unseen_validation_files = [os.path.join(a.list_input_unseen_wavs_dir[i], x.split('|')[0] + '.wav')
for x in fi.read().split('\n') if len(x) > 0]
print("first unseen {}th validation fileset: {}".format(i, unseen_validation_files[0]))
list_unseen_validation_files.append(unseen_validation_files)
return training_files, validation_files, list_unseen_validation_files
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, hparams, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None, is_seen=True):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.hparams = hparams
self.is_seen = is_seen
if self.is_seen:
self.name = pathlib.Path(self.audio_files[0]).parts[0]
else:
self.name = '-'.join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/")
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
print("INFO: checking dataset integrity...")
for i in tqdm(range(len(self.audio_files))):
assert os.path.exists(self.audio_files[i]), "{} not found".format(self.audio_files[i])
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename, self.sampling_rate)
audio = audio / MAX_WAV_VALUE
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
else: # validation step
# match audio length to self.hop_size * n for evaluation
if (audio.size(1) % self.hop_size) != 0:
audio = audio[:, :-(audio.size(1) % self.hop_size)]
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False)
assert audio.shape[1] == mel.shape[2] * self.hop_size, "audio shape {} mel shape {}".format(audio.shape, mel.shape)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
| BigVGAN-main | meldataset.py |
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import torch.nn as nn
from torch.nn import functional as F
from .filter import LowPassFilter1d
from .filter import kaiser_sinc_filter1d
class UpSample1d(nn.Module):
def __init__(self, ratio=2, kernel_size=None):
super().__init__()
self.ratio = ratio
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
self.stride = ratio
self.pad = self.kernel_size // ratio - 1
self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio,
half_width=0.6 / ratio,
kernel_size=self.kernel_size)
self.register_buffer("filter", filter)
# x: [B, C, T]
def forward(self, x):
_, C, _ = x.shape
x = F.pad(x, (self.pad, self.pad), mode='replicate')
x = self.ratio * F.conv_transpose1d(
x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
x = x[..., self.pad_left:-self.pad_right]
return x
class DownSample1d(nn.Module):
def __init__(self, ratio=2, kernel_size=None):
super().__init__()
self.ratio = ratio
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio,
half_width=0.6 / ratio,
stride=ratio,
kernel_size=self.kernel_size)
def forward(self, x):
xx = self.lowpass(x)
return xx | BigVGAN-main | alias_free_torch/resample.py |
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
from .filter import *
from .resample import *
from .act import * | BigVGAN-main | alias_free_torch/__init__.py |
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import torch.nn as nn
from .resample import UpSample1d, DownSample1d
class Activation1d(nn.Module):
def __init__(self,
activation,
up_ratio: int = 2,
down_ratio: int = 2,
up_kernel_size: int = 12,
down_kernel_size: int = 12):
super().__init__()
self.up_ratio = up_ratio
self.down_ratio = down_ratio
self.act = activation
self.upsample = UpSample1d(up_ratio, up_kernel_size)
self.downsample = DownSample1d(down_ratio, down_kernel_size)
# x: [B,C,T]
def forward(self, x):
x = self.upsample(x)
x = self.act(x)
x = self.downsample(x)
return x | BigVGAN-main | alias_free_torch/act.py |
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
if 'sinc' in dir(torch):
sinc = torch.sinc
else:
# This code is adopted from adefossez's julius.core.sinc under the MIT License
# https://adefossez.github.io/julius/julius/core.html
# LICENSE is in incl_licenses directory.
def sinc(x: torch.Tensor):
"""
Implementation of sinc, i.e. sin(pi * x) / (pi * x)
__Warning__: Different to julius.sinc, the input is multiplied by `pi`!
"""
return torch.where(x == 0,
torch.tensor(1., device=x.device, dtype=x.dtype),
torch.sin(math.pi * x) / math.pi / x)
# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
# https://adefossez.github.io/julius/julius/lowpass.html
# LICENSE is in incl_licenses directory.
def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
even = (kernel_size % 2 == 0)
half_size = kernel_size // 2
#For kaiser window
delta_f = 4 * half_width
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
if A > 50.:
beta = 0.1102 * (A - 8.7)
elif A >= 21.:
beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.)
else:
beta = 0.
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
if even:
time = (torch.arange(-half_size, half_size) + 0.5)
else:
time = torch.arange(kernel_size) - half_size
if cutoff == 0:
filter_ = torch.zeros_like(time)
else:
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
# Normalize filter to have sum = 1, otherwise we will have a small leakage
# of the constant component in the input signal.
filter_ /= filter_.sum()
filter = filter_.view(1, 1, kernel_size)
return filter
class LowPassFilter1d(nn.Module):
def __init__(self,
cutoff=0.5,
half_width=0.6,
stride: int = 1,
padding: bool = True,
padding_mode: str = 'replicate',
kernel_size: int = 12):
# kernel_size should be even number for stylegan3 setup,
# in this implementation, odd number is also possible.
super().__init__()
if cutoff < -0.:
raise ValueError("Minimum cutoff must be larger than zero.")
if cutoff > 0.5:
raise ValueError("A cutoff above 0.5 does not make sense.")
self.kernel_size = kernel_size
self.even = (kernel_size % 2 == 0)
self.pad_left = kernel_size // 2 - int(self.even)
self.pad_right = kernel_size // 2
self.stride = stride
self.padding = padding
self.padding_mode = padding_mode
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
self.register_buffer("filter", filter)
#input [B, C, T]
def forward(self, x):
_, C, _ = x.shape
if self.padding:
x = F.pad(x, (self.pad_left, self.pad_right),
mode=self.padding_mode)
out = F.conv1d(x, self.filter.expand(C, -1, -1),
stride=self.stride, groups=C)
return out | BigVGAN-main | alias_free_torch/filter.py |
# Copyright (c) 2022 NVIDIA CORPORATION.
# Licensed under the MIT license.
import os, glob
def get_wav_and_text_filelist(data_root, data_type, subsample=1):
wav_list = sorted([path.replace(data_root, "")[1:] for path in glob.glob(os.path.join(data_root, data_type, "**/**/*.wav"))])
wav_list = wav_list[::subsample]
txt_filelist = [path.replace('.wav', '.normalized.txt') for path in wav_list]
txt_list = []
for txt_file in txt_filelist:
with open(os.path.join(data_root, txt_file), 'r') as f_txt:
text = f_txt.readline().strip('\n')
txt_list.append(text)
wav_list = [path.replace('.wav', '') for path in wav_list]
return wav_list, txt_list
def write_filelist(output_path, wav_list, txt_list):
with open(output_path, 'w') as f:
for i in range(len(wav_list)):
filename = wav_list[i] + '|' + txt_list[i]
f.write(filename + '\n')
if __name__ == "__main__":
data_root = "LibriTTS"
# dev and test sets. subsample each sets to get ~100 utterances
data_type_list = ["dev-clean", "dev-other", "test-clean", "test-other"]
subsample_list = [50, 50, 50, 50]
for (data_type, subsample) in zip(data_type_list, subsample_list):
print("processing {}".format(data_type))
data_path = os.path.join(data_root, data_type)
assert os.path.exists(data_path),\
"path {} not found. make sure the path is accessible by creating the symbolic link using the following command: "\
"ln -s /path/to/your/{} {}".format(data_path, data_path, data_path)
wav_list, txt_list = get_wav_and_text_filelist(data_root, data_type, subsample)
write_filelist(os.path.join(data_root, data_type+".txt"), wav_list, txt_list)
# training and seen speaker validation datasets (libritts-full): train-clean-100 + train-clean-360 + train-other-500
wav_list_train, txt_list_train = [], []
for data_type in ["train-clean-100", "train-clean-360", "train-other-500"]:
print("processing {}".format(data_type))
data_path = os.path.join(data_root, data_type)
assert os.path.exists(data_path),\
"path {} not found. make sure the path is accessible by creating the symbolic link using the following command: "\
"ln -s /path/to/your/{} {}".format(data_path, data_path, data_path)
wav_list, txt_list = get_wav_and_text_filelist(data_root, data_type)
wav_list_train.extend(wav_list)
txt_list_train.extend(txt_list)
# split the training set so that the seen speaker validation set contains ~100 utterances
subsample_val = 3000
wav_list_val, txt_list_val = wav_list_train[::subsample_val], txt_list_train[::subsample_val]
del wav_list_train[::subsample_val]
del txt_list_train[::subsample_val]
write_filelist(os.path.join(data_root, "train-full.txt"), wav_list_train, txt_list_train)
write_filelist(os.path.join(data_root, "val-full.txt"), wav_list_val, txt_list_val)
print("done") | BigVGAN-main | parse_scripts/parse_libritts.py |
import os
import ctypes
import time
import sys
import argparse
import cv2
import numpy as np
from PIL import Image
import tensorrt as trt
import utils.inference as inference_utils # TRT/TF inference wrappers
import utils.model as model_utils # UFF conversion
import utils.boxes as boxes_utils # Drawing bounding boxes
import utils.coco as coco_utils # COCO dataset descriptors
from utils.paths import PATHS # Path management
import pycuda.driver as cuda
import pycuda.autoinit
# COCO label list
COCO_LABELS = coco_utils.COCO_CLASSES_LIST
# Model used for inference
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
# Confidence threshold for drawing bounding box
VISUALIZATION_THRESHOLD = 0.5
# Precision command line argument -> TRT Engine datatype
TRT_PRECISION_TO_DATATYPE = {
16: trt.DataType.HALF,
32: trt.DataType.FLOAT,
8: trt.DataType.INT8
}
# Layout of TensorRT network output metadata
TRT_PREDICTION_LAYOUT = {
"image_id": 0,
"label": 1,
"confidence": 2,
"xmin": 3,
"ymin": 4,
"xmax": 5,
"ymax": 6
}
def fetch_prediction_field(field_name, detection_out, pred_start_idx):
"""Fetches prediction field from prediction byte array.
After TensorRT inference, prediction data is saved in
byte array and returned by object detection network.
This byte array contains several pieces of data about
prediction - we call one such piece a prediction field.
The prediction fields layout is described in TRT_PREDICTION_LAYOUT.
This function, given prediction byte array returned by network,
staring index of given prediction and field name of interest,
returns prediction field data corresponding to given arguments.
Args:
field_name (str): field of interest, one of keys of TRT_PREDICTION_LAYOUT
detection_out (array): object detection network output
pred_start_idx (int): start index of prediction of interest in detection_out
Returns:
Prediction field corresponding to given data.
"""
return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
def analyze_prediction(detection_out, pred_start_idx, img_pil):
image_id = int(fetch_prediction_field("image_id", detection_out, pred_start_idx))
label = int(fetch_prediction_field("label", detection_out, pred_start_idx))
confidence = fetch_prediction_field("confidence", detection_out, pred_start_idx)
xmin = fetch_prediction_field("xmin", detection_out, pred_start_idx)
ymin = fetch_prediction_field("ymin", detection_out, pred_start_idx)
xmax = fetch_prediction_field("xmax", detection_out, pred_start_idx)
ymax = fetch_prediction_field("ymax", detection_out, pred_start_idx)
if confidence > VISUALIZATION_THRESHOLD:
class_name = COCO_LABELS[label]
confidence_percentage = "{0:.0%}".format(confidence)
print("Detected {} with confidence {}".format(
class_name, confidence_percentage))
boxes_utils.draw_bounding_boxes_on_image(
img_pil, np.array([[ymin, xmin, ymax, xmax]]),
display_str_list=["{}: {}".format(
class_name, confidence_percentage)],
color=coco_utils.COCO_COLORS[label]
)
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection inference on input image.')
parser.add_argument('--input_img_path', metavar='INPUT_IMG_PATH',
help='an image file to run inference on')
parser.add_argument('-p', '--precision', type=int, choices=[32, 16, 8], default=32,
help='desired TensorRT float precision to build an engine with')
parser.add_argument('-b', '--max_batch_size', type=int, default=1,
help='max TensorRT engine batch size')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument('-fc', '--flatten_concat',
help='path of built FlattenConcat plugin')
parser.add_argument('-d', '--calib_dataset', default='../VOCdevkit/VOC2007/JPEGImages',
help='path to the calibration dataset')
parser.add_argument('-c', '--camera', default=True,
help='if True, will run webcam application')
# Parse arguments passed
args = parser.parse_args()
# Set FlattenConcat TRT plugin path and
# workspace dir path if passed by user
if args.flatten_concat:
PATHS.set_flatten_concat_plugin_path(args.flatten_concat)
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
try:
os.makedirs(PATHS.get_workspace_dir_path())
except:
pass
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths()
# Fetch TensorRT engine path and datatype
args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype,
args.max_batch_size)
try:
os.makedirs(os.path.dirname(args.trt_engine_path))
except:
pass
return args
def main():
# Parse command line arguments
args = parse_commandline_arguments()
# Fetch .uff model path, convert from .pb
# if needed, using prepare_ssd_model
ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
if not os.path.exists(ssd_model_uff_path):
model_utils.prepare_ssd_model(MODEL_NAME)
# Set up all TensorRT data structures needed for inference
trt_inference_wrapper = inference_utils.TRTInference(
args.trt_engine_path, ssd_model_uff_path,
trt_engine_datatype=args.trt_engine_datatype,
calib_dataset = args.calib_dataset,
batch_size=args.max_batch_size)
print("TRT ENGINE PATH", args.trt_engine_path)
if args.camera == True:
print('Running webcam:', args.camera)
# Define the video stream
cap = cv2.VideoCapture(0) # Change only if you have more than one webcams
# Loop for running inference on frames from the webcam
while True:
# Read frame from camera (and expand its dimensions to fit)
ret, image_np = cap.read()
# Actually run inference
detection_out, keep_count_out = trt_inference_wrapper.infer_webcam(image_np)
# Overlay the bounding boxes on the image
# let analyze_prediction() draw them based on model output
img_pil = Image.fromarray(image_np)
prediction_fields = len(TRT_PREDICTION_LAYOUT)
for det in range(int(keep_count_out[0])):
analyze_prediction(detection_out, det * prediction_fields, img_pil)
final_img = np.asarray(img_pil)
# Display output
cv2.imshow('object detection', final_img)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == '__main__':
main()
| object-detection-tensorrt-example-master | SSD_Model/detect_objects_webcam.py |
# COCO dataset utility functions
import numpy as np
COCO_CLASSES_LIST = [
'unlabeled',
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'street sign',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'hat',
'backpack',
'umbrella',
'shoe',
'eye glasses',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'plate',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'mirror',
'dining table',
'window',
'desk',
'toilet',
'door',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'blender',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
]
COCO_CLASSES_SET = set(COCO_CLASSES_LIST)
COCO_CLASS_ID = {
cls_name: idx for idx, cls_name in enumerate(COCO_CLASSES_LIST)
}
# Random RGB colors for each class (useful for drawing bounding boxes)
COCO_COLORS = \
np.random.uniform(0, 255, size=(len(COCO_CLASSES_LIST), 3)).astype(np.uint8)
def is_coco_label(label):
"""Returns boolean which tells if given label is COCO label.
Args:
label (str): object label
Returns:
bool: is given label a COCO class label
"""
return label in COCO_CLASSES_SET
def get_coco_label_color(label):
"""Returns color corresponding to given COCO label, or None.
Args:
label (str): object label
Returns:
np.array: RGB color described in 3-element np.array
"""
if not is_coco_label(label):
return None
else:
return COCO_COLORS[COCO_CLASS_ID[label]]
| object-detection-tensorrt-example-master | SSD_Model/utils/coco.py |
#!/usr/bin/env python3
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
import sys
import os
import ctypes
import time
import argparse
import glob
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
import numpy as np
import tensorrt as trt
from PIL import Image
# Utility functions
import utils.inference as inference_utils # TRT/TF inference wrappers
import utils.model as model_utils # UFF conversion
import utils.mAP as voc_mAP_utils # mAP computation
import utils.voc as voc_utils # VOC dataset descriptors
import utils.coco as coco_utils # COCO dataset descriptors
from utils.paths import PATHS # Path management
# VOC and COCO label lists
VOC_CLASSES = voc_utils.VOC_CLASSES_LIST
COCO_LABELS = coco_utils.COCO_CLASSES_LIST
# Model used for inference
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
# Precision command line argument -> TRT Engine datatype
TRT_PRECISION_TO_DATATYPE = {
8: trt.DataType.INT8,
16: trt.DataType.HALF,
32: trt.DataType.FLOAT
}
# Layout of TensorRT network output metadata
TRT_PREDICTION_LAYOUT = {
"image_id": 0,
"label": 1,
"confidence": 2,
"xmin": 3,
"ymin": 4,
"xmax": 5,
"ymax": 6
}
class Detection(object):
"""Describes detection for VOC detection file.
During evaluation of model on VOC, we save objects detected to result
files, with one file per each class. One line in such file corresponds
to one object that is detected in an image. The Detection class describes
one such detection.
Attributes:
image_number (str): number of image from VOC dataset
confidence (float): confidence score for detection
xmin (float): bounding box min x coordinate
ymin (float): bounding box min y coordinate
xmax (float): bounding box max x coordinate
ymax (float): bounding box max y coordinate
"""
def __init__(self, image_number, confidence, xmin, ymin, xmax, ymax):
self.image_number = image_number
self.confidence = confidence
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __repr__(self):
return "{} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
self.image_number, self.confidence,
self.xmin, self.ymin, self.xmax, self.ymax
)
def write_to_file(self, f):
"""Adds detection corresponding to Detection object to file f.
Args:
f (file): detection results file
"""
f.write(self.__repr__())
def fetch_prediction_field(field_name, detection_out, pred_start_idx):
"""Fetches prediction field from prediction byte array.
After TensorRT inference, prediction data is saved in
byte array and returned by object detection network.
This byte array contains several pieces of data about
prediction - we call one such piece a prediction field.
This function, given prediction byte array returned by network,
staring index of given prediction and field name of interest,
returns prediction field data corresponding to given arguments.
Args:
field_name (str): field of interest, one of keys of TRT_PREDICTION_LAYOUT
detection_out (array): object detection network output
pred_start_idx (int): start index of prediction of interest in detection_out
Returns:
Prediction field corresponding to given data.
"""
return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
def analyze_tensorrt_prediction(detection_out, pred_start_idx):
image_id = int(fetch_prediction_field("image_id", detection_out, pred_start_idx))
label = int(fetch_prediction_field("label", detection_out, pred_start_idx))
confidence = fetch_prediction_field("confidence", detection_out, pred_start_idx)
xmin = fetch_prediction_field("xmin", detection_out, pred_start_idx)
ymin = fetch_prediction_field("ymin", detection_out, pred_start_idx)
xmax = fetch_prediction_field("xmax", detection_out, pred_start_idx)
ymax = fetch_prediction_field("ymax", detection_out, pred_start_idx)
xmin = float(xmin) * model_utils.ModelData.get_input_width()
ymin = float(ymin) * model_utils.ModelData.get_input_height()
xmax = float(xmax) * model_utils.ModelData.get_input_width()
ymax = float(ymax) * model_utils.ModelData.get_input_height()
return image_id, label, confidence, xmin, ymin, xmax, ymax
def produce_tensorrt_detections(detection_files, trt_inference_wrapper, max_batch_size,
image_numbers, image_path):
"""Fetches output from TensorRT model, and saves it to results file.
The output of TensorRT model is a pair of:
* location byte array that contains detection metadata,
which is layout according to TRT_PREDICTION_LAYOUT
* number of detections returned by NMS
TRT_PREDICTION_LAYOUT fields correspond to Tensorflow ones as follows:
label -> detection_classes
confidence -> detection_scores
xmin, ymin, xmax, ymax -> detection_boxes
The number of detections correspond to num_detection Tensorflow output.
Tensorflow output semantics is more throughly explained in
produce_tensorflow_detections().
This function iterates over all VOC images, feeding each one
into TensotRT model, fetching object detections
from each output, converting them to Detection object,
and saving to detection result file.
Args:
detection_files (dict): dictionary that maps class labels to
class result files
trt_inference_wrapper (inference_utils.TRTInference):
internal Python class wrapping TensorRT inferece
setup/run code
batch_size (int): batch size used for inference
image_numbers [str]: VOC image numbers to use for inference
image_path (str): Python string, which stores path to VOC image file,
when you do image_path.format(voc_mage_number)
"""
total_imgs = len(image_numbers)
for idx in range(0, len(image_numbers), max_batch_size):
imgs = image_numbers[idx:idx+max_batch_size]
batch_size = len(imgs)
print("Infering image {}/{}".format(idx+1, total_imgs))
image_paths = [image_path.format(img) for img in imgs]
detections, keep_count = trt_inference_wrapper.infer_batch(image_paths)
prediction_fields = len(TRT_PREDICTION_LAYOUT)
for img_idx, img_number in enumerate(imgs):
img_predictions_start_idx = prediction_fields * keep_count[img_idx] * img_idx
for det in range(int(keep_count[img_idx])):
_, label, confidence, xmin, ymin, xmax, ymax = \
analyze_tensorrt_prediction(detections, img_predictions_start_idx + det * prediction_fields)
if confidence > 0.0:
label_name = voc_utils.coco_label_to_voc_label(COCO_LABELS[label])
if label_name:
det_file = detection_files[label_name]
detection = Detection(
img_number,
confidence,
xmin,
ymin,
xmax,
ymax,
)
detection.write_to_file(det_file)
def produce_tensorflow_detections(detection_files, tf_inference_wrapper, batch_size,
image_numbers, image_path):
"""Fetches output from Tensorflow model, and saves it to results file.
The format of output from Tensorflow is output_dict Python
dictionary containing following fields:
num_detections: maximum number of detections keeped per image
detection_classes: label of classes detected
detection_scores: confidences for detections
detection_boxes: bounding box coordinates for detections,
in format (ymin, xmin, ymax, xmax)
This function iterates over all VOC images, feeding each one
into Tensorflow model, fetching object detections
from each output, converting them to Detection object,
and saving to detection result file.
Args:
detection_files (dict): dictionary that maps class labels to
class result files
tf_inference_wrapper (inference_utils.TensorflowInference):
internal Python class wrapping Tensorflow inferece
setup/run code
batch_size (int): batch size used for inference
image_numbers [str]: VOC image numbers to use for inference
image_path (str): Python string, which stores path to VOC image file,
when you do image_path.format(voc_mage_number)
"""
total_imgs = len(image_numbers)
for idx in range(0, len(image_numbers), batch_size):
print("Infering image {}/{}".format(idx+1, total_imgs))
imgs = image_numbers[idx:idx+batch_size]
image_paths = [image_path.format(img) for img in imgs]
output_dict = tf_inference_wrapper.infer_batch(image_paths)
keep_count = output_dict['num_detections']
for img_idx, img_number in enumerate(imgs):
for det in range(int(keep_count[img_idx])):
label = output_dict['detection_classes'][img_idx][det]
confidence = output_dict['detection_scores'][img_idx][det]
bbox = output_dict['detection_boxes'][img_idx][det]
# Output bounding boxes are in [0, 1] format,
# here we rescale them to pixel [0, 255] format
ymin, xmin, ymax, xmax = bbox
xmin = float(xmin) * model_utils.ModelData.get_input_width()
ymin = float(ymin) * model_utils.ModelData.get_input_height()
xmax = float(xmax) * model_utils.ModelData.get_input_width()
ymax = float(ymax) * model_utils.ModelData.get_input_height()
# Detection is saved only if confidence is bigger than zero
if confidence > 0.0:
# Model was trained on COCO, so we need to convert label to VOC one
label_name = voc_utils.coco_label_to_voc_label(COCO_LABELS[label])
if label_name: # Checks for label_name correctness
det_file = detection_files[label_name]
detection = Detection(
img_number,
confidence,
xmin,
ymin,
xmax,
ymax,
)
detection.write_to_file(det_file)
def should_skip_inference(parsed_args):
"""Checks if inference should be skipped.
When evaluating on VOC, if results from some earlier run
of the script exist, we can reuse them to evaluate VOC mAP.
The user can overwrite this behavior by supplying -f flag to the script.
Args:
parsed_args (dict): commandline arguments parsed by
parse_commandline_arguments()
Returns:
bool: if True, script skips inference
"""
skip_inference = True
for voc_class in VOC_CLASSES:
voc_class_detection_file = \
os.path.join(parsed_args['results_dir'], 'det_test_{}.txt'.format(voc_class))
if os.path.exists(voc_class_detection_file) and not parsed_args['force_inference']:
continue
else:
skip_inference = False
if skip_inference:
print("Model detections present - skipping inference. To avoid this, use -f flag.")
return skip_inference
def preprocess_voc():
"""Resizes all VOC images to 300x300 and saves them into .ppm files.
This script assumes all images fetched to network in batches have size 300x300,
so in this function we preproceess all VOC images to fit that format.
"""
voc_root = PATHS.get_voc_dir_path()
voc_jpegs = glob.glob(
os.path.join(voc_root, 'JPEGImages', '*.jpg'))
voc_ppms = glob.glob(
os.path.join(voc_root, 'PPMImages', '*.ppm'))
# Check if preprocessing is needed by comparing
# image names between JPEGImages and PPMImages
voc_jpegs_basenames = \
[os.path.splitext(os.path.basename(p))[0] for p in voc_jpegs]
voc_ppms_basenames = \
[os.path.splitext(os.path.basename(p))[0] for p in voc_ppms]
# If lists are not the same, preprocessing is needed
if sorted(voc_jpegs_basenames) != sorted(voc_ppms_basenames):
print("Preprocessing VOC dataset. It may take few minutes.")
# Make PPMImages directory if it doesn't exist
voc_ppms_path = PATHS.get_voc_ppm_img_path()
if not os.path.exists(os.path.dirname(voc_ppms_path)):
os.makedirs(os.path.dirname(voc_ppms_path))
# For each .jpg file, make a resized
# .ppm copy to fit model input expectations
for voc_jpeg_path in voc_jpegs:
voc_jpeg_basename = os.path.basename(voc_jpeg_path)
voc_ppm_path = voc_ppms_path.format(
os.path.splitext(voc_jpeg_basename)[0])
if not os.path.exists(voc_ppm_path):
img_pil = Image.open(voc_jpeg_path)
img_pil = img_pil.resize(
size=(
model_utils.ModelData.get_input_width(),
model_utils.ModelData.get_input_height()),
resample=Image.BILINEAR
)
img_pil.save(voc_ppm_path)
def adjust_paths(args):
"""Adjust all file/directory paths, arguments passed by user.
During script launch, user can pass several arguments to the script
(e.g. --workspace_dir, --voc_dir), that define where script will look
for the files needed for execution. This function adjusts internal
Paths Python datastructure to accomodate for changes from defaults
requested by user through appropriate command line arguments.
Args:
args (argparse.Namespace): parsed user arguments
"""
if args.voc_dir:
PATHS.set_voc_dir_path(args.voc_dir)
if args.flatten_concat:
PATHS.set_flatten_concat_plugin_path(args.flatten_concat)
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
if not os.path.exists(PATHS.get_workspace_dir_path()):
os.makedirs(PATHS.get_workspace_dir_path())
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection evaluation on VOC2007 dataset.')
parser.add_argument('inference_backend', metavar='INFERENCE_BACKEND',
type=str, choices=['tensorrt', 'tensorflow'], default='tensorrt', nargs='?',
help='inference backend to run evaluation with')
parser.add_argument('-p', '--precision', type=int, choices=[32, 16, 8], default=32,
help='desired TensorRT float precision to build an engine with')
parser.add_argument('-b', '--max_batch_size', type=int, default=64,
help='max TensorRT engine batch size')
parser.add_argument('-f', '--force_inference', action='store_true',
help='force model inference even if detections exist')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument('-fc', '--flatten_concat',
help='path of built FlattenConcat plugin')
parser.add_argument('-voc', '--voc_dir',
help='VOC2007 root directory')
# Parse arguments passed
args = parser.parse_args()
# Adjust global Paths data structure
adjust_paths(args)
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths(should_verify_voc=True)
# Fetch directory to save inference results to, create it if it doesn't exist
trt_engine_datatype = None
trt_engine_path = None
if args.inference_backend == 'tensorrt':
# In case of TensorRT we also fetch engine data type and engine path
trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
trt_engine_path = PATHS.get_engine_path(trt_engine_datatype,
args.max_batch_size)
if not os.path.exists(os.path.dirname(trt_engine_path)):
os.makedirs(os.path.dirname(trt_engine_path))
results_dir = PATHS.get_voc_model_detections_path('tensorrt',
trt_engine_datatype)
elif args.inference_backend == 'tensorflow':
results_dir = PATHS.get_voc_model_detections_path('tensorflow')
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# Return parsed arguments for further functions to use
parsed = {
'inference_backend': args.inference_backend,
'max_batch_size': args.max_batch_size,
'force_inference': args.force_inference,
'results_dir': results_dir,
'trt_engine_path': trt_engine_path,
'trt_engine_datatype': trt_engine_datatype
}
return parsed
if __name__ == '__main__':
# Parse command line arguments
parsed = parse_commandline_arguments()
# Check if inference should be skipped (if model inference
# results are already computed, we don't need to recompute
# them for VOC mAP computation)
skip_inference = should_skip_inference(parsed)
# And if inference will not be skipped, then we
# create files to store its results in
detection_files = {}
if not skip_inference:
for voc_class in VOC_CLASSES:
detection_files[voc_class] = open(
os.path.join(
parsed['results_dir'], 'det_test_{}.txt'.format(voc_class)
), 'w'
)
# Loading FlattenConcat plugin library using CDLL has a side
# effect of loading FlattenConcat plugin into internal TensorRT
# PluginRegistry data structure. This will be needed when parsing
# network into UFF, since some operations will need to use this plugin
try:
ctypes.CDLL(PATHS.get_flatten_concat_plugin_path())
except FileNotFoundError:
print(
"Error: {}\n{}\n{}".format(
"Could not find {}".format(PATHS.get_flatten_concat_plugin_path()),
"Make sure you have compiled FlattenConcat custom plugin layer",
"For more details, check README.md"
)
)
sys.exit(1)
# Fetch frozen model .pb path...
ssd_model_pb_path = PATHS.get_model_pb_path(MODEL_NAME)
# ...and .uff path, if needed (converting .pb to .uff if not already done)
if parsed['inference_backend'] == 'tensorrt':
ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
if not os.path.exists(ssd_model_uff_path):
model_utils.prepare_ssd_model(MODEL_NAME)
# This block of code sets up and performs inference, if needed
if not skip_inference:
# Preprocess VOC dataset if necessary by resizing images
preprocess_voc()
# Fetch image list and input .ppm files path
with open(PATHS.get_voc_image_set_path(), 'r') as f:
voc_image_numbers = f.readlines()
voc_image_numbers = [line.strip() for line in voc_image_numbers]
voc_image_path = PATHS.get_voc_ppm_img_path()
# Tensorflow and TensorRT paths are a little bit different,
# so we must treat each one individually
if parsed['inference_backend'] == 'tensorrt':
# TRTInference initialization initializes
# all TensorRT structures, creates engine if it doesn't
# already exist and finally saves it to file for future uses
trt_inference_wrapper = inference_utils.TRTInference(
parsed['trt_engine_path'], ssd_model_uff_path,
parsed['trt_engine_datatype'], parsed['max_batch_size'])
# Outputs from TensorRT are handled differently than
# outputs from Tensorflow, that's why we use another
# function to produce the detections from them
produce_tensorrt_detections(detection_files,
trt_inference_wrapper, parsed['max_batch_size'],
voc_image_numbers, voc_image_path)
elif parsed['inference_backend'] == 'tensorflow':
# In case of Tensorflow all we need to
# initialize inference is frozen model...
tf_inference_wrapper = \
inference_utils.TensorflowInference(ssd_model_pb_path)
# ...and after initializing it, we can
# proceed to producing detections
produce_tensorflow_detections(detection_files,
tf_inference_wrapper, parsed['max_batch_size'],
voc_image_numbers, voc_image_path)
# Flush detection to files to make sure evaluation is correct
for key in detection_files:
detection_files[key].flush()
# Do mAP computation based on saved detections
voc_mAP_utils.do_python_eval(parsed['results_dir'])
# Close detection files, they are not needed anymore
for key in detection_files:
detection_files[key].close()
| object-detection-tensorrt-example-master | SSD_Model/utils/voc_evaluation.py |
# uff_ssd path management singleton class
import os
import sys
import tensorrt as trt
class Paths(object):
def __init__(self):
self._SAMPLE_ROOT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir
)
self._FLATTEN_CONCAT_PLUGIN_PATH = os.path.join(
self._SAMPLE_ROOT,
'build',
'libflattenconcat.so'
)
self._WORKSPACE_DIR_PATH = os.path.join(
self._SAMPLE_ROOT,
'workspace'
)
self._VOC_DIR_PATH = \
os.path.join(self._SAMPLE_ROOT, 'VOCdevkit', 'VOC2007')
# User configurable paths
def set_workspace_dir_path(self, workspace_dir):
self._WORKSPACE_DIR_PATH = workspace_dir
def get_workspace_dir_path(self):
return self._WORKSPACE_DIR_PATH
def set_flatten_concat_plugin_path(self, plugin_path):
self._FLATTEN_CONCAT_PLUGIN_PATH = plugin_path
def get_flatten_concat_plugin_path(self):
return self._FLATTEN_CONCAT_PLUGIN_PATH
def set_voc_dir_path(self, voc_dir_path):
self._VOC_DIR_PATH = voc_dir_path
def get_voc_dir_path(self):
return self._VOC_DIR_PATH
# Fixed paths
def get_sample_root(self):
return self._SAMPLE_ROOT
def get_models_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'models')
def get_engines_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'engines')
def get_engine_path(self, inference_type=trt.DataType.FLOAT, max_batch_size=1):
inference_type_to_str = {
trt.DataType.FLOAT: 'FLOAT',
trt.DataType.HALF: 'HALF',
trt.DataType.INT32: 'INT32',
trt.DataType.INT8: 'INT8'
}
return os.path.join(
self.get_engines_dir_path(),
inference_type_to_str[inference_type],
'engine_bs_{}.buf'.format(max_batch_size))
def get_voc_annotation_cache_path(self):
return os.path.join(self.get_workspace_dir_path(), 'annotations_cache')
def get_voc_image_set_path(self):
return os.path.join(self.get_voc_dir_path(), 'ImageSets', 'Main', 'test.txt')
def get_voc_annotation_path(self):
return os.path.join(self.get_voc_dir_path(), 'Annotations', '{}.xml')
def get_voc_ppm_img_path(self):
return os.path.join(self.get_voc_dir_path(), 'PPMImages', '{}.ppm')
def get_voc_jpg_img_path(self):
return os.path.join(self.get_voc_dir_path(), 'JPEGImages', '{}.jpg')
def get_voc_tensorflow_model_detections_path(self):
return os.path.join(self.get_workspace_dir_path(), 'results', 'tensorflow')
def get_voc_tensorrt_model_detections_path(self, trt_engine_datatype=trt.DataType.FLOAT):
trt_results_path = \
os.path.join(self.get_workspace_dir_path(), 'results', 'tensorrt')
if trt_engine_datatype == trt.DataType.HALF:
return os.path.join(trt_results_path, 'HALF')
else:
return os.path.join(trt_results_path, 'FLOAT')
def get_voc_model_detections_path(self, backend='tensorrt', use_fp16=False):
if backend != 'tensorrt':
return self.get_voc_tensorflow_model_detections_path()
else:
return self.get_voc_tensorrt_model_detections_path(use_fp16)
def get_model_url(self, model_name):
return 'http://download.tensorflow.org/models/object_detection/{}.tar.gz'.format(model_name)
def get_model_dir_path(self, model_name):
return os.path.join(self.get_models_dir_path(), model_name)
def get_model_pb_path(self, model_name):
return os.path.join(
self.get_model_dir_path(model_name),
'frozen_inference_graph.pb'
)
def get_model_uff_path(self, model_name):
return os.path.join(
self.get_model_dir_path(model_name),
'frozen_inference_graph.uff'
)
# Paths correctness verifier
def verify_all_paths(self, should_verify_voc=False):
error = False
if should_verify_voc:
error = self._verify_voc_paths()
if not os.path.exists(self.get_workspace_dir_path()):
error = True
if error:
print("An error occured when running the script.")
sys.exit(1)
def _verify_voc_paths(self):
error = False
voc_dir = self.get_voc_dir_path()
voc_image_list = self.get_voc_image_set_path()
# 1) Check if directory and image list file are present
if not os.path.exists(voc_dir) or \
not os.path.exists(voc_image_list):
self._print_incorrect_voc_error(voc_dir)
error = True
# 2) Check if all images listed in image list are present
with open(voc_image_list, 'r') as f:
image_numbers = f.readlines()
image_numbers = [line.strip() for line in image_numbers]
if not self._verify_voc(image_numbers):
self._print_incorrect_voc_error(voc_dir)
error = True
return error
def _verify_voc(self, voc_image_list):
voc_image_path = self.get_voc_jpg_img_path()
for img_number in voc_image_list:
img = voc_image_path.format(img_number)
if not os.path.exists(img):
return False
return True
# Error printers
def _print_incorrect_voc_error(self, voc_dir):
print(
"Error: {}\n{}\n{}".format(
"Incomplete VOC dataset detected (voc_dir: {})".format(voc_dir),
"Try redownloading VOC or check if --voc_dir is set up correctly",
"For more details, check README.md"
)
)
PATHS = Paths()
| object-detection-tensorrt-example-master | SSD_Model/utils/paths.py |
object-detection-tensorrt-example-master | SSD_Model/utils/__init__.py |
|
# Utility functions for drawing bounding boxes on PIL images
import numpy as np
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
def draw_bounding_boxes_on_image(image,
boxes,
color=(255, 0, 0),
thickness=4,
display_str_list=()):
"""Draws bounding boxes on image.
Args:
image (PIL.Image): PIL.Image object
boxes (np.array): a 2 dimensional numpy array
of [N, 4]: (ymin, xmin, ymax, xmax)
The coordinates are in normalized format between [0, 1]
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): bounding box line thickness
display_str_list [str]: list of strings.
Contains one string for each bounding box.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('boxes must be of size [N, 4]')
for i in range(boxes_shape[0]):
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list[i])
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color=(255, 0, 0),
thickness=4,
display_str='',
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
The string passed in display_str is displayed above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the string
is displayed below the bounding box.
Args:
image (PIL.Image): PIL.Image object
ymin (float): ymin of bounding box
xmin (float): xmin of bounding box
ymax (float): ymax of bounding box
xmax (float): xmax of bounding box
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): line thickness
display_str (str): string to display in box
use_normalized_coordinates (bool): If True, treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=tuple(color))
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display string added to the top of the bounding
# box exceeds the top of the image, move the string below the bounding box
# instead of above
display_str_height = font.getsize(display_str)[1]
# Each display_str has a top and bottom margin of 0.05x
total_display_str_height = (1 + 2 * 0.05) * display_str_height
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=tuple(color))
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
| object-detection-tensorrt-example-master | SSD_Model/utils/boxes.py |
# Model download and UFF convertion utils
import os
import sys
import tarfile
import requests
import tensorflow as tf
import tensorrt as trt
import graphsurgeon as gs
import uff
from utils.paths import PATHS
# UFF conversion functionality
# This class contains converted (UFF) model metadata
class ModelData(object):
# Name of input node
INPUT_NAME = "Input"
# CHW format of model input
INPUT_SHAPE = (3, 300, 300)
# Name of output node
OUTPUT_NAME = "NMS"
@staticmethod
def get_input_channels():
return ModelData.INPUT_SHAPE[0]
@staticmethod
def get_input_height():
return ModelData.INPUT_SHAPE[1]
@staticmethod
def get_input_width():
return ModelData.INPUT_SHAPE[2]
def ssd_unsupported_nodes_to_plugin_nodes(ssd_graph):
"""Makes ssd_graph TensorRT comparible using graphsurgeon.
This function takes ssd_graph, which contains graphsurgeon
DynamicGraph data structure. This structure describes frozen Tensorflow
graph, that can be modified using graphsurgeon (by deleting, adding,
replacing certain nodes). The graph is modified by removing
Tensorflow operations that are not supported by TensorRT's UffParser
and replacing them with custom layer plugin nodes.
Note: This specific implementation works only for
ssd_inception_v2_coco_2017_11_17 network.
Args:
ssd_graph (gs.DynamicGraph): graph to convert
Returns:
gs.DynamicGraph: UffParser compatible SSD graph
"""
# Create TRT plugin nodes to replace unsupported ops in Tensorflow graph
channels = ModelData.get_input_channels()
height = ModelData.get_input_height()
width = ModelData.get_input_width()
Input = gs.create_plugin_node(name="Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, channels, height, width])
PriorBox = gs.create_plugin_node(name="GridAnchor", op="GridAnchor_TRT",
minSize=0.2,
maxSize=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
variance=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1],
numLayers=6
)
NMS = gs.create_plugin_node(
name="NMS",
op="NMS_TRT",
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=1e-8,
nmsThreshold=0.6,
topK=100,
keepTopK=100,
numClasses=91,
inputOrder=[0, 2, 1],
confSigmoid=1,
isNormalized=1
)
concat_priorbox = gs.create_node(
"concat_priorbox",
op="ConcatV2",
dtype=tf.float32,
axis=2
)
concat_box_loc = gs.create_plugin_node(
"concat_box_loc",
op="FlattenConcat_TRT",
dtype=tf.float32,
axis=1,
ignoreBatch=0
)
concat_box_conf = gs.create_plugin_node(
"concat_box_conf",
op="FlattenConcat_TRT",
dtype=tf.float32,
axis=1,
ignoreBatch=0
)
# Create a mapping of namespace names -> plugin nodes.
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"MultipleGridAnchorGenerator/Concatenate": concat_priorbox,
"MultipleGridAnchorGenerator/Identity": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
# Create a new graph by collapsing namespaces
ssd_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
# If remove_exclusive_dependencies is True, the whole graph will be removed!
ssd_graph.remove(ssd_graph.graph_outputs, remove_exclusive_dependencies=False)
return ssd_graph
def model_to_uff(model_path, output_uff_path, silent=False):
"""Takes frozen .pb graph, converts it to .uff and saves it to file.
Args:
model_path (str): .pb model path
output_uff_path (str): .uff path where the UFF file will be saved
silent (bool): if True, writes progress messages to stdout
"""
dynamic_graph = gs.DynamicGraph(model_path)
dynamic_graph = ssd_unsupported_nodes_to_plugin_nodes(dynamic_graph)
uff.from_tensorflow(
dynamic_graph.as_graph_def(),
[ModelData.OUTPUT_NAME],
output_filename=output_uff_path,
text=True
)
# Model download functionality
def maybe_print(should_print, print_arg):
"""Prints message if supplied boolean flag is true.
Args:
should_print (bool): if True, will print print_arg to stdout
print_arg (str): message to print to stdout
"""
if should_print:
print(print_arg)
def maybe_mkdir(dir_path):
"""Makes directory if it doesn't exist.
Args:
dir_path (str): directory path
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def download_file(file_url, file_dest_path, silent=False):
"""Downloads file from supplied URL and puts it into supplied directory.
Args:
file_url (str): URL with file to download
file_dest_path (str): path to save downloaded file in
silent (bool): if True, writes progress messages to stdout
"""
with open(file_dest_path, "wb") as f:
maybe_print(not silent, "Downloading {}".format(file_dest_path))
response = requests.get(file_url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None or silent: # no content length header or silent, just write file
f.write(response.content)
else: # not silent, print progress
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write(
"\rDownload progress [{}{}] {}%".format(
'=' * done, ' ' * (50-done),
int(100 * dl / total_length)))
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
def download_model(model_name, silent=False):
"""Downloads model_name from Tensorflow model zoo.
Args:
model_name (str): chosen object detection model
silent (bool): if True, writes progress messages to stdout
"""
maybe_print(not silent, "Preparing pretrained model")
model_dir = PATHS.get_models_dir_path()
maybe_mkdir(model_dir)
model_url = PATHS.get_model_url(model_name)
model_archive_path = os.path.join(model_dir, "{}.tar.gz".format(model_name))
download_file(model_url, model_archive_path, silent)
maybe_print(not silent, "Download complete\nUnpacking {}".format(model_archive_path))
with tarfile.open(model_archive_path, "r:gz") as tar:
tar.extractall(path=model_dir)
maybe_print(not silent, "Extracting complete\nRemoving {}".format(model_archive_path))
os.remove(model_archive_path)
maybe_print(not silent, "Model ready")
def prepare_ssd_model(model_name="ssd_inception_v2_coco_2017_11_17", silent=False):
"""Downloads pretrained object detection model and converts it to UFF.
The model is downloaded from Tensorflow object detection model zoo.
Currently only ssd_inception_v2_coco_2017_11_17 model is supported
due to model_to_uff() using logic specific to that network when converting.
Args:
model_name (str): chosen object detection model
silent (bool): if True, writes progress messages to stdout
"""
if model_name != "ssd_inception_v2_coco_2017_11_17":
raise NotImplementedError(
"Model {} is not supported yet".format(model_name))
download_model(model_name, silent)
ssd_pb_path = PATHS.get_model_pb_path(model_name)
ssd_uff_path = PATHS.get_model_uff_path(model_name)
model_to_uff(ssd_pb_path, ssd_uff_path, silent)
| object-detection-tensorrt-example-master | SSD_Model/utils/model.py |
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import Image
import numpy as np
# For reading size information from batches
import struct
IMG_H, IMG_W, IMG_CH = 300, 300, 3
class SSDEntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, data_dir, cache_file):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.num_calib_imgs = 100 # the number of images from the dataset to use for calibration
self.batch_size = 10
self.batch_shape = (self.batch_size, IMG_CH, IMG_H, IMG_W)
self.cache_file = cache_file
calib_imgs = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]
self.calib_imgs = np.random.choice(calib_imgs, self.num_calib_imgs)
self.counter = 0 # for keeping track of how many files we have read
self.device_input = cuda.mem_alloc(trt.volume(self.batch_shape) * trt.float32.itemsize)
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
# if there are not enough calibration images to form a batch,
# we have reached the end of our data set
if self.counter == self.num_calib_imgs:
return None
# debugging
if self.counter % 10 == 0:
print('Running Batch:', self.counter)
batch_imgs = np.zeros((self.batch_size, IMG_H*IMG_W*IMG_CH))
for i in range(self.batch_size):
image = Image.open(self.calib_imgs[self.counter + i])
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(IMG_H, IMG_W),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
img_np = np.ascontiguousarray(img_np)
# add this image to the batch array
batch_imgs[i,:] = img_np
# increase the counter for this batch
self.counter += self.batch_size
# Copy to device, then return a list containing pointers to input device buffers.
cuda.memcpy_htod(self.device_input, batch_imgs.astype(np.float32))
return [int(self.device_input)]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
print('writing calibration file')
with open(self.cache_file, "wb") as f:
f.write(cache)
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, 3)
).astype(np.uint8)
| object-detection-tensorrt-example-master | SSD_Model/utils/calibrator.py |
# VOC mAP computation, based on https://github.com/amdegroot/ssd.pytorch
import os
import sys
import pickle
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
import utils.voc as voc_utils
from utils.paths import PATHS
def parse_voc_annotation_xml(voc_annotiotion_xml):
"""Parse VOC annotation XML file.
VOC image annotations are described in XML files
shipped with VOC dataset, with one XML file per each image.
This function reads relevant object detection data from given
file and saves it to Python data structures.
Args:
voc_annotation_xml (str): VOC annotation XML file path
Returns:
Python list of object detections metadata.
"""
tree = ET.parse(voc_annotiotion_xml)
size = tree.find('size')
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['image_width'] = size.find('width').text
obj_struct['image_height'] = size.find('height').text
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
# Coordinates in VOC XMLs are in [1, 256] format, but we use [0, 255]
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_voc_results_file_template(cls, results_dir):
"""Fetches inference detection result file path for given class.
During TensorRT/Tensorflow inference, we save class detections into
separate files, for later mAP computation. This function fetches
paths of these files.
Args:
cls (str): VOC class label
results_dir (str): path of directory containing detection results
Returns:
str: Detection results path for given class.
"""
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_test_{}.txt'.format(cls)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
path = os.path.join(results_dir, filename)
return path
def do_python_eval(results_dir):
cachedir = PATHS.get_voc_annotation_cache_path()
aps = []
for i, cls in enumerate(voc_utils.VOC_CLASSES_LIST):
filename = get_voc_results_file_template(cls, results_dir)
rec, prec, ap = voc_eval(
filename,
PATHS.get_voc_image_set_path(),
cls, cachedir,
ovthresh=0.5)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
print('Mean AP = {:.4f}'.format(np.mean(aps)))
def voc_ap(rec, prec):
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
return ap
def read_voc_annotations(annotations_dir, image_numbers):
if not os.path.isdir(annotations_dir):
os.makedirs(annotations_dir)
annotations_file = os.path.join(annotations_dir, 'annots.pkl')
if not os.path.isfile(annotations_file):
# If annotations were not present, compute them
detections = {}
for i, image_num in enumerate(image_numbers):
detections[image_num] = parse_voc_annotation_xml(
PATHS.get_voc_annotation_path().format(image_num))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(image_numbers)))
# Save
print('Saving cached annotations to {:s}'.format(annotations_file))
with open(annotations_file, 'wb') as f:
pickle.dump(detections, f)
else:
# If annotations were present, load them
with open(annotations_file, 'rb') as f:
detections = pickle.load(f)
return detections
def extract_class_detetions(voc_detections, classname, image_numbers):
class_detections = {}
for image_num in image_numbers:
R = [obj for obj in voc_detections[image_num] if obj['name'] == classname]
image_bboxes = [x['bbox'] for x in R]
# Transform VOC bboxes to make them describe pre-resized 300x300 images
for idx, bbox in enumerate(image_bboxes):
bbox = np.array(bbox).astype(np.float32)
width = float(R[0]['image_width'])
height = float(R[0]['image_height'])
bbox[0] *= (300.0 / width)
bbox[2] *= (300.0 / width)
bbox[1] *= (300.0 / height)
bbox[3] *= (300.0 / height)
image_bboxes[idx] = bbox
image_bboxes = np.array(image_bboxes)
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
class_detections[image_num] = {
'bbox': image_bboxes,
'difficult': difficult,
'det': det
}
return class_detections
def voc_eval(detpath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5):
with open(imagesetfile, 'r') as f:
lines = f.readlines()
image_numbers = [x.strip() for x in lines]
voc_detections = read_voc_annotations(cachedir, image_numbers)
class_detections = extract_class_detetions(voc_detections, classname,
image_numbers)
is_detection_difficult = np.concatenate(
[class_detections[image_num]['difficult'] for image_num in image_numbers]
)
not_difficult_count = sum(~is_detection_difficult)
# Read detections outputed by model
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines):
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
bboxes = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
bboxes = bboxes[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# Go down dets and mark TPs and FPs
num_detections = len(image_ids)
tp = np.zeros(num_detections)
fp = np.zeros(num_detections)
for detection in range(num_detections):
R = class_detections[image_ids[detection]]
bbox = bboxes[detection, :].astype(float)
ovmax = -np.inf
bbox_gt = R['bbox'].astype(float)
if bbox_gt.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(bbox_gt[:, 0], bbox[0])
iymin = np.maximum(bbox_gt[:, 1], bbox[1])
ixmax = np.minimum(bbox_gt[:, 2], bbox[2])
iymax = np.minimum(bbox_gt[:, 3], bbox[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) +
(bbox_gt[:, 2] - bbox_gt[:, 0]) *
(bbox_gt[:, 3] - bbox_gt[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[detection] = 1.
R['det'][jmax] = 1
else:
fp[detection] = 1.
else:
fp[detection] = 1.
# Compute precision and recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(not_difficult_count)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
| object-detection-tensorrt-example-master | SSD_Model/utils/mAP.py |
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import cv2
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from PIL import Image
from utils.model import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from utils.common import HostDeviceMem
import utils.calibrator as calibrator
import struct
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, calib_dataset=None, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = 2 << 30
builder.max_batch_size = batch_size
if trt_engine_datatype == trt.DataType.HALF:
builder.fp16_mode = True
elif trt_engine_datatype == trt.DataType.INT8:
builder.fp16_mode = True
builder.int8_mode = True
builder.int8_calibrator = calibrator.SSDEntropyCalibrator(data_dir=calib_dataset, cache_file='INT8CacheFile')
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
return builder.build_cuda_engine(network)
def save_engine(engine, engine_dest_path):
print('Engine:', engine)
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| object-detection-tensorrt-example-master | SSD_Model/utils/engine.py |
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
import os
import argparse
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
try:
# Sometimes python2 does not understand FileNotFoundError
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def GiB(val):
return val * 1 << 30
def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[]):
'''
Parses sample arguments.
Args:
description (str): Description of the sample.
subfolder (str): The subfolder containing data relevant to this sample
find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.
Returns:
str: Path of data directory.
Raises:
FileNotFoundError
'''
# Standard command-line arguments for all samples.
kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data")
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory.", default=kDEFAULT_DATA_ROOT)
args, unknown_args = parser.parse_known_args()
# If data directory is not specified, use the default.
data_root = args.datadir
# If the subfolder exists, append it to the path, otherwise use the provided path as-is.
subfolder_path = os.path.join(data_root, subfolder)
data_path = subfolder_path
if not os.path.exists(subfolder_path):
print("WARNING: " + subfolder_path + " does not exist. Trying " + data_root + " instead.")
data_path = data_root
# Make sure data directory exists.
if not (os.path.exists(data_path)):
raise FileNotFoundError(data_path + " does not exist. Please provide the correct data path with the -d option.")
# Find all requested files.
for index, f in enumerate(find_files):
find_files[index] = os.path.abspath(os.path.join(data_path, f))
if not os.path.exists(find_files[index]):
raise FileNotFoundError(find_files[index] + " does not exist. Please provide the correct data path with the -d option.")
return data_path, find_files
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
| object-detection-tensorrt-example-master | SSD_Model/utils/common.py |
# Utility functions for performing image inference
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
import os
import sys
import time
import tensorrt as trt
from PIL import Image
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import utils.engine as engine_utils # TRT Engine creation/save/load utils
import utils.model as model_utils # UFF conversion uttils
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
import utils.common as common
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class TRTInference(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, trt_engine_path, uff_model_path, trt_engine_datatype=trt.DataType.FLOAT, calib_dataset=None, batch_size=1):
"""Initializes TensorRT objects needed for model inference.
Args:
trt_engine_path (str): path where TensorRT engine should be stored
uff_model_path (str): path of .uff model
trt_engine_datatype (trt.DataType):
requested precision of TensorRT engine used for inference
batch_size (int): batch size for which engine
should be optimized for
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
# TRT engine placeholder
self.trt_engine = None
# Display requested engine settings to stdout
print("TensorRT inference engine settings:")
print(" * Inference precision - {}".format(trt_engine_datatype))
print(" * Max batch size - {}\n".format(batch_size))
# If engine is not cached, we need to build it
if not os.path.exists(trt_engine_path):
# This function uses supplied .uff file
# alongside with UffParser to build TensorRT
# engine. For more details, check implmentation
self.trt_engine = engine_utils.build_engine(
uff_model_path, TRT_LOGGER,
trt_engine_datatype=trt_engine_datatype,
calib_dataset=calib_dataset,
batch_size=batch_size)
# Save the engine to file
engine_utils.save_engine(self.trt_engine, trt_engine_path)
# If we get here, the file with engine exists, so we can load it
if not self.trt_engine:
print("Loading cached TensorRT engine from {}".format(
trt_engine_path))
self.trt_engine = engine_utils.load_engine(
self.trt_runtime, trt_engine_path)
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = \
engine_utils.allocate_buffers(self.trt_engine)
# Execution context is needed for inference
self.context = self.trt_engine.create_execution_context()
# Allocate memory for multiple usage [e.g. multiple batch inference]
input_volume = trt.volume(model_utils.ModelData.INPUT_SHAPE)
self.numpy_array = np.zeros((self.trt_engine.max_batch_size, input_volume))
def infer(self, image_path):
"""Infers model on given image.
Args:
image_path (str): image to run object detection model on
"""
# Load image into CPU
img = self._load_img(image_path)
# Copy it into appropriate place into memory
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, img.ravel())
# When infering on single image, we measure inference
# time to output it to the user
inference_start_time = time.time()
# Fetch output from the model
[detection_out, keepCount_out] = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream)
# Output inference time
print("TensorRT inference time: {} ms".format(
int(round((time.time() - inference_start_time) * 1000))))
# And return results
return detection_out, keepCount_out
def infer_webcam(self, arr):
"""Infers model on given image.
Args:
arr (numpy array): image to run object detection model on
"""
# Load image into CPU
img = self._load_img_webcam(arr)
# Copy it into appropriate place into memory
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, img.ravel())
# When infering on single image, we measure inference
# time to output it to the user
inference_start_time = time.time()
# Fetch output from the model
[detection_out, keepCount_out] = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream)
# Output inference time
print("TensorRT inference time: {} ms".format(
int(round((time.time() - inference_start_time) * 1000))))
# And return results
return detection_out, keepCount_out
def infer_batch(self, image_paths):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.trt_engine.max_batch_size
actual_batch_size = len(image_paths)
if actual_batch_size > max_batch_size:
raise ValueError(
"image_paths list bigger ({}) than engine max batch size ({})".format(actual_batch_size, max_batch_size))
# Load all images to CPU...
imgs = self._load_imgs(image_paths)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, imgs.ravel())
# ...fetch model outputs...
[detection_out, keep_count_out] = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size)
# ...and return results.
return detection_out, keep_count_out
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, model_utils.ModelData.get_input_channels())
).astype(np.uint8)
def _load_imgs(self, image_paths):
batch_size = self.trt_engine.max_batch_size
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
self.numpy_array[idx] = img_np
return self.numpy_array
def _load_img_webcam(self, arr):
image = Image.fromarray(np.uint8(arr))
model_input_width = model_utils.ModelData.get_input_width()
model_input_height = model_utils.ModelData.get_input_height()
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(model_input_width, model_input_height),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
def _load_img(self, image_path):
image = Image.open(image_path)
model_input_width = model_utils.ModelData.get_input_width()
model_input_height = model_utils.ModelData.get_input_height()
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(model_input_width, model_input_height),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
| object-detection-tensorrt-example-master | SSD_Model/utils/inference.py |
# VOC dataset utility functions
import numpy as np
VOC_CLASSES_LIST = [
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor'
]
VOC_CLASSES_SET = set(VOC_CLASSES_LIST)
VOC_CLASS_ID = {
cls_name: idx for idx, cls_name in enumerate(VOC_CLASSES_LIST)
}
# Random RGB colors for each class (useful for drawing bounding boxes)
VOC_COLORS = \
np.random.uniform(0, 255, size=(len(VOC_CLASSES_LIST), 3)).astype(np.uint8)
def convert_coco_to_voc(label):
"""Converts COCO class name to VOC class name, if possible.
COCO classes are a superset of VOC classes, but
some classes have different names (e.g. airplane
in COCO is aeroplane in VOC). This function gets
COCO label and converts it to VOC label,
if conversion is needed.
Args:
label (str): COCO label
Returns:
str: VOC label corresponding to given label if such exists,
otherwise returns original label
"""
COCO_VOC_DICT = {
'airplane': 'aeroplane',
'motorcycle': 'motorbike',
'dining table': 'diningtable',
'potted plant': 'pottedplant',
'couch': 'sofa',
'tv': 'tvmonitor'
}
if label in COCO_VOC_DICT:
return COCO_VOC_DICT[label]
else:
return label
def coco_label_to_voc_label(label):
"""Returns VOC label corresponding to given COCO label.
COCO classes are superset of VOC classes, this function
returns label corresponding to given COCO class label
or None if such label doesn't exist.
Args:
label (str): COCO class label
Returns:
str: VOC label corresponding to given label or None
"""
label = convert_coco_to_voc(label)
if label in VOC_CLASSES_SET:
return label
else:
return None
def is_voc_label(label):
"""Returns boolean which tells if given label is VOC label.
Args:
label (str): object label
Returns:
bool: is given label a VOC class label
"""
return label in VOC_CLASSES_SET
def get_voc_label_color(label):
"""Returns color corresponding to given VOC label, or None.
Args:
label (str): object label
Returns:
np.array: RGB color described in 3-element np.array
"""
if not is_voc_label(label):
return None
else:
return VOC_COLORS[VOC_CLASS_ID[label]]
| object-detection-tensorrt-example-master | SSD_Model/utils/voc.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The setup script."""
from setuptools import setup, find_packages
requirements = [
'Click>=6.0',
'docker',
'contexttimer',
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(ryanolson): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='nvidia_deepops',
version='0.4.2',
description="Core Python library for DeepOps services",
author="Ryan Olson",
author_email='[email protected]',
# url='https://github.com/ryanolson/nvidia_deepops',
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': [
'deepops-progress=nvidia_deepops.cli:progress_cli'
]
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='nvidia_deepops',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
# "Programming Language :: Python :: 2",
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| ngc-container-replicator-master | python/setup.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import logging
import os
# import pprint
import pytest
import traceback
from click.testing import CliRunner
from docker.errors import APIError
from nvidia_deepops import utils
# from nvidia_deepops import cli
from nvidia_deepops.docker import (BaseClient, DockerClient, registry)
BaseRegistry = registry.BaseRegistry
# DockerRegistry = registry.DockerRegistry
DGXRegistry = registry.DGXRegistry
NGCRegistry = registry.NGCRegistry
dev = utils.get_logger(__name__, level=logging.DEBUG)
try:
from .secrets import ngcpassword, dgxpassword
HAS_SECRETS = True
except Exception:
HAS_SECRETS = False
secrets = pytest.mark.skipif(not HAS_SECRETS, reason="No secrets.py file found")
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# assert 'cloner.cli.main' in result.output
# help_result = runner.invoke(cli.main, ['--help'])
# assert help_result.exit_code == 0
# assert '--help Show this message and exit.' in help_result.output
class FakeClient(BaseClient):
def __init__(self, registries=None, images=None):
self.registries = registries or []
self.images = images or []
def registry_for_url(self, url):
for reg in self.registries:
if url.startswith(reg.url):
return reg
raise RuntimeError("registry not found for %s" % url)
def url_to_name_and_tag(self, url, reg=None):
dev.debug("url: %s" % url)
reg = reg or self.registry_for_url(url)
return reg.url_to_name_and_tag(url)
def should_be_present(self, url):
if url not in self.images:
dev.debug(self.images)
raise ValueError("client does not have an image named %s" % url)
def pull(self, url):
reg = self.registry_for_url(url)
name, tag = self.url_to_name_and_tag(url, reg=reg)
reg.should_be_present(name, tag=tag)
self.images.append(url)
self.should_be_present(url)
def tag(self, src, dst):
self.should_be_present(src)
self.images.append(dst)
def push(self, url):
self.should_be_present(url)
reg = self.registry_for_url(url)
dev.debug("push %s" % url)
name, tag = self.url_to_name_and_tag(url, reg=reg)
reg.images[name].append(tag)
def remove(self, url):
self.should_be_present(url)
self.images.remove(url)
class FakeRegistry(BaseRegistry):
def __init__(self, url, images=None):
self.url = url
self.images = collections.defaultdict(list)
images = images or {}
for name, tags in images.items():
self.images[name] = tags
def docker_url(self, name, tag="latest"):
return "{}/{}:{}".format(self.url, name, tag)
def url_to_name_and_tag(self, url):
name_tag = url.replace(self.url + "/", "").split(":")
dev.debug("name_tag: %s" % name_tag)
if len(name_tag) == 1:
return name_tag, "latest"
elif len(name_tag) == 2:
return name_tag
else:
raise RuntimeError("bad name_tag")
def should_be_present(self, url_or_name, tag=None):
if url_or_name.startswith(self.url) and tag is None:
name, tag = self.url_to_name_and_tag(url_or_name)
else:
name, tag = url_or_name, tag or "latest"
if tag not in self.images[name]:
dev.debug(self.images)
raise ValueError("%s not found for %s" % (tag, name))
def get_image_tags(self, image_name):
return self.images[image_name]
def get_image_names(self, project=None):
def predicate(name):
if project:
return name.startswith(project + "/")
return True
return [name for name in self.images.keys() if predicate(name)]
def get_state(self, project=None, filter_fn=None):
image_names = self.get_image_names(project=project)
state = collections.defaultdict(dict)
for name in image_names:
for tag in self.images[name]:
if filter_fn is not None and callable(filter_fn):
if not filter_fn(name=name, tag=tag, docker_id=tag):
continue
state[name][tag] = tag
return state
def test_fakeregistry_docker_url():
fqdn = FakeRegistry("nvcr.io")
assert fqdn.docker_url("nvidia/pytorch") == "nvcr.io/nvidia/pytorch:latest"
assert fqdn.docker_url("nvidia/pytorch", "17.05") == \
"nvcr.io/nvidia/pytorch:17.05"
fqdn = FakeRegistry("nvcr.io:5000")
assert fqdn.docker_url("nvidia/pytorch") == \
"nvcr.io:5000/nvidia/pytorch:latest"
assert fqdn.docker_url("nvidia/pytorch", "17.05") == \
"nvcr.io:5000/nvidia/pytorch:17.05"
@pytest.fixture
def nvcr():
return FakeRegistry("nvcr.io", images={
"nvidia/tensorflow": ["17.07", "17.06"],
"nvidia/pytorch": ["17.07", "17.05"],
"nvidia/cuda": ["8.0-devel", "9.0-devel"],
"nvidian_sas/dgxbench": ["16.08"],
"nvidian_sas/dgxdash": ["latest"],
})
@pytest.fixture
def locr():
return FakeRegistry("registry:5000", images={
"nvidia/pytorch": ["17.06", "17.05"],
"nvidia/cuda": ["8.0-devel"],
})
def test_get_state(nvcr):
state = nvcr.get_state(project="nvidia")
assert len(state.keys()) == 3
assert len(state["nvidia/tensorflow"].keys()) == 2
assert state["nvidia/cuda"]["9.0-devel"] == "9.0-devel"
def test_get_state_filter(nvcr):
def filter_on_tag(*, name, tag, docker_id):
try:
val = float(tag)
except Exception:
traceback.print_exc()
return True
return val >= 17.06
state = nvcr.get_state(project="nvidia", filter_fn=filter_on_tag)
assert len(state.keys()) == 3
assert len(state["nvidia/tensorflow"].keys()) == 2
assert len(state["nvidia/pytorch"].keys()) == 1
def test_client_lifecycle(nvcr, locr):
client = FakeClient(registries=[nvcr, locr])
# we should see an exception when the image is not in the registry
with pytest.raises(Exception):
client.pull(nvcr.docker_url("nvidia/pytorch", tag="17.06"))
src = nvcr.docker_url("nvidia/pytorch", tag="17.07")
dst = locr.docker_url("nvidia/pytorch", tag="17.07")
with pytest.raises(Exception):
locr.should_be_present(dst)
client.pull(src)
client.should_be_present(src)
client.tag(src, dst)
client.push(dst)
locr.should_be_present(dst)
client.remove(src)
with pytest.raises(Exception):
client.should_be_present(src)
# client.delete_remote(src)
# with pytest.raises(Exception):
# nvcr.should_be_present(src)
def test_pull_nonexistent_image(nvcr):
client = FakeClient(registries=[nvcr])
with pytest.raises(Exception):
client.pull(nvcr.docker_url("nvidia/tensorflow", "latest"))
def test_push_nonexistent_image(locr):
client = FakeClient(registries=[locr])
with pytest.raises(Exception):
client.push(locr.docker_url("ryan/awesome"))
def docker_client_pull_and_remove(client, url):
client.pull(url)
image = client.get(url=url)
assert image is not None
client.remove(url)
with pytest.raises(APIError):
client.client.images.get(url)
assert client.get(url=url) is None
@pytest.mark.remote
@pytest.mark.dockerclient
@pytest.mark.parametrize("image_name", [
"busybox:latest",
"ubuntu:16.04",
])
def test_pull_and_remove_from_docker_hub(image_name):
client = DockerClient()
docker_client_pull_and_remove(client, image_name)
@secrets
@pytest.mark.nvcr
@pytest.mark.remote
@pytest.mark.dockerclient
@pytest.mark.parametrize("image_name", [
"nvcr.io/nvsa_clone/busybox:latest",
"nvcr.io/nvsa_clone/ubuntu:16.04",
])
def test_pull_and_remove_from_nvcr(image_name):
client = DockerClient()
client.login(
username="$oauthtoken",
password=dgxpassword,
registry="nvcr.io/v2")
docker_client_pull_and_remove(client, image_name)
@secrets
@pytest.mark.remote
@pytest.mark.dockerregistry
def test_get_state_dgx():
dgx_registry = DGXRegistry(dgxpassword)
state = dgx_registry.get_state(project="nvidia")
dev.debug(state)
assert state["nvidia/cuda"]["8.0-cudnn5.1-devel-ubuntu14.04"] == \
"c61f351b591fbfca93b3c0fcc3bd0397e7f3c6c2c2f1880ded2fdc1e5f9edd9e"
@secrets
@pytest.mark.remote
@pytest.mark.dockerregistry
def test_get_state_ngc():
ngc_registry = NGCRegistry(ngcpassword)
state = ngc_registry.get_state(project="nvidia")
dev.debug(state)
assert "9.0-cudnn7-devel-ubuntu16.04" in state["nvidia/cuda"]
@secrets
@pytest.mark.nvcr
@pytest.mark.remote
@pytest.mark.dockerregistry
def test_dgx_registry_list():
dgx_registry = DGXRegistry(dgxpassword)
images_and_tags = dgx_registry.get_images_and_tags(project="nvsa_clone")
dev.debug(images_and_tags)
assert "nvsa_clone/busybox" in images_and_tags
assert "nvsa_clone/ubuntu" in images_and_tags
assert "latest" in images_and_tags["nvsa_clone/busybox"]
assert "16.04" in images_and_tags["nvsa_clone/ubuntu"]
@secrets
@pytest.mark.nvcr
@pytest.mark.remote
@pytest.mark.dockerregistry
def test_ngc_registry_list():
ngc_registry = NGCRegistry(ngcpassword)
images_and_tags = ngc_registry.get_images_and_tags(project="nvidia")
dev.debug(images_and_tags)
images = ["nvidia/tensorflow", "nvidia/pytorch",
"nvidia/mxnet", "nvidia/tensorrt"]
for image in images:
assert image in images_and_tags
assert "17.12" in images_and_tags[image]
@secrets
@pytest.mark.nvcr
@pytest.mark.remote
def test_dgx_markdowns():
dgx_registry = DGXRegistry(dgxpassword)
markdowns = dgx_registry.get_image_descriptions(project="nvidia")
dev.debug(markdowns)
assert "nvidia/cuda" in markdowns
@secrets
@pytest.mark.nvcr
@pytest.mark.remote
def test_ngc_markdowns():
ngc_registry = NGCRegistry(ngcpassword)
markdowns = ngc_registry.get_images_and_tags(project="nvidia")
dev.debug(markdowns)
images = ["nvidia/tensorflow", "nvidia/pytorch",
"nvidia/mxnet", "nvidia/tensorrt"]
for image in images:
assert image in markdowns
@pytest.mark.new
@pytest.mark.remote
@pytest.mark.parametrize("url", [
"busybox:latest",
])
def test_pull_and_save_and_remove(url):
client = DockerClient()
client.pull(url)
filename = client.save(url)
assert os.path.exists(filename)
client.remove(url)
assert client.get(url=url) is None
read_url = client.load(filename)
assert read_url == url
assert client.get(url=url) is not None
client.remove(url)
os.unlink(filename)
assert not os.path.exists(filename)
| ngc-container-replicator-master | python/tests/test_nvidia_deepops.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test package for nvidia_deepops."""
| ngc-container-replicator-master | python/tests/__init__.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level package for NVIDIA DeepOps Python Library."""
__author__ = """Ryan Olson"""
__email__ = '[email protected]'
__version__ = '0.4.2'
from nvidia_deepops.progress import Progress
| ngc-container-replicator-master | python/nvidia_deepops/__init__.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Console script for nvidia_deepops."""
import click
import hashlib
import yaml
from . import progress
@click.command()
def main(args=None):
"""Console script for nvidia_deepops."""
click.echo("cli coming soon...")
@click.command()
@click.option("--name", required=True)
@click.option("--key", required=True)
@click.option("--status", type=click.Choice(progress.STATES.values()))
@click.option("--header")
@click.option("--subtitle")
@click.option("--fixed/--infinite", default=True)
@click.option("--op", type=click.Choice(["create", "append", "update", "run"]))
def progress_cli(name, key, status, header, subtitle, fixed, op):
op = op or "run"
with progress.load_state(name) as state:
if fixed:
state.set_fixed_progress()
else:
state.set_infinite_progress()
if op == "create":
state.steps.clear()
if op == "create" or op == "append":
state.add_step(key=key, status=status, header=header, subHeader=subtitle)
elif op == "update":
step = state.steps[key]
status = status or step["status"]
header = header or step["header"]
subtitle = subtitle or step["subHeader"]
state.update_step(key=key, status=status, header=header, subHeader=subtitle)
state.post()
elif op == "run":
keys = list(state.steps.keys())
completed_keys = keys[0:keys.index(key)]
for k in completed_keys:
state.update_step(key=k, status="complete")
state.update_step(key=key, status="running")
click.echo("{op} Step: {key}\nHeader: {header}\nSubtitle: {subHeader}".format(
op=op.title(), key=key, **state.steps[key])
)
state.post()
else:
raise RuntimeError("this shouldn't happen")
if __name__ == "__main__":
main()
| ngc-container-replicator-master | python/nvidia_deepops/cli.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import contextlib
import logging
import os
import shlex
import subprocess
import sys
def get_logger(name, level=None):
level = level or logging.INFO
log = logging.getLogger(name)
log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
return log
def execute(command, stdout=None, stderr=None):
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
return subprocess.check_call(shlex.split(command), stdout=stdout,
stderr=stderr)
@contextlib.contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir) | ngc-container-replicator-master | python/nvidia_deepops/utils.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import hashlib
import itertools
import json
import logging
import os
import requests
import yaml
from contextlib import contextmanager
from . import utils
log = utils.get_logger(__name__, level=logging.INFO)
STATES = {
"waiting": "waiting",
"running": "running",
"complete": "complete",
"error": "error",
}
def filename(name, path=None):
path = path or "/tmp"
sha256 = hashlib.sha256()
sha256.update(os.path.join("/tmp/{}".format(name)).encode("utf-8"))
filename = sha256.hexdigest()
return os.path.join(path, filename)
@contextmanager
def load_state(name, progress_uri=None, path=None):
progress_uri = progress_uri or os.environ.get("DEEPOPS_WEBUI_PROGRESS_URI")
_filename = filename(name, path=path)
p = Progress(uri=progress_uri)
if os.path.exists(_filename):
p.read_prgress(_filename)
yield p
p.write_progress(_filename)
class Progress:
def __init__(self, *, uri=None, progress_length_unknown=False):
self.uri = uri
self.steps = collections.OrderedDict()
self.progress_length_unknown = progress_length_unknown
def add_step(self, *, key, status=None, header=None, subHeader=None):
self.steps[key] = {
"status": STATES.get(status, "waiting"),
"header": header or key,
"subHeader": subHeader or ""
}
def set_infinite_progress(self):
self.progress_length_unknown = True
def set_fixed_progress(self):
self.progress_length_unknown = False
def update_step(self, *, key, status, header=None, subHeader=None):
step = self.steps[key]
step["status"] = STATES[status]
if header:
step["header"] = header
if subHeader:
step["subHeader"] = subHeader
def write_progress(self, path):
ordered_data = {
"keys": list(self.steps.keys()),
"vals": list(self.steps.values()),
"length_unknown": self.progress_length_unknown
}
with open(path, "w") as file:
yaml.dump(ordered_data, file)
def read_prgress(self, path):
if not os.path.exists(path):
raise RuntimeError("{} does not exist".format(path))
with open(path, "r") as file:
ordered_data = yaml.load(file)
if ordered_data is None:
return
steps = collections.OrderedDict()
for key, val in zip(ordered_data["keys"], ordered_data["vals"]):
steps[key] = val
self.steps = steps
self.progress_length_unknown = ordered_data["length_unknown"]
@contextmanager
def run_step(self, *, key, post_on_complete=True, progress_length_unknown=None):
progress_length_unknown = progress_length_unknown or self.progress_length_unknown
step = self.steps[key]
step["status"] = STATES["running"]
self.post(progress_length_unknown=progress_length_unknown)
try:
yield step
step["status"] = STATES["complete"]
except Exception as err:
step["status"] = STATES["error"]
step["subHeader"] = str(err)
post_on_complete = True
raise
finally:
if post_on_complete:
self.post(progress_length_unknown=progress_length_unknown)
def data(self, progress_length_unknown=False):
progress_length_unknown = progress_length_unknown or self.progress_length_unknown
steps = [v for _, v in self.steps.items()]
return {
"percent": -2 if progress_length_unknown else -1,
"steps": steps
}
def post(self, progress_length_unknown=None):
progress_length_unknown = progress_length_unknown or self.progress_length_unknown
data = self.data(progress_length_unknown=progress_length_unknown)
log.debug(data)
if self.uri:
try:
r = requests.post(self.uri, json=data)
r.raise_for_status()
except Exception as err:
log.warn("progress update failed with {}".format(str(err)))
| ngc-container-replicator-master | python/nvidia_deepops/progress.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .client import *
from .registry import *
| ngc-container-replicator-master | python/nvidia_deepops/docker/__init__.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import logging
import contexttimer
import requests
from requests.auth import AuthBase, HTTPBasicAuth
from nvidia_deepops import utils
from nvidia_deepops.docker.registry.base import BaseRegistry
__all__ = ('DockerRegistry',)
log = utils.get_logger(__name__, level=logging.INFO)
class RegistryError(Exception):
def __init__(self, message, code=None, detail=None):
super(RegistryError, self).__init__(message)
self.code = code
self.detail = detail
@classmethod
def from_data(cls, data):
"""
Encapsulate an error response in an exception
"""
errors = data.get('errors')
if not errors or len(errors) == 0:
return cls('Unknown error!')
# For simplicity, we'll just include the first error.
err = errors[0]
return cls(
message=err.get('message'),
code=err.get('code'),
detail=err.get('detail'),
)
class BearerAuth(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, req):
req.headers['Authorization'] = 'Bearer {}'.format(self.token)
return req
class DockerRegistry(BaseRegistry):
def __init__(self, *, url, username=None, password=None, verify_ssl=False):
url = url.rstrip('/')
if not (url.startswith('http://') or url.startswith('https://')):
url = 'https://' + url
self.url = url
self.username = username
self.password = password
self.verify_ssl = verify_ssl
self.auth = None
def authenticate(self):
"""
Forcefully auth for testing
"""
r = requests.head(self.url + '/v2/', verify=self.verify_ssl)
self._authenticate_for(r)
def _authenticate_for(self, resp):
"""
Authenticate to satsify the unauthorized response
"""
# Get the auth. info from the headers
scheme, params = resp.headers['Www-Authenticate'].split(None, 1)
assert (scheme == 'Bearer')
info = {k: v.strip('"') for k, v in (i.split('=')
for i in params.split(','))}
# Request a token from the auth server
params = {k: v for k, v in info.items() if k in ('service', 'scope')}
auth = HTTPBasicAuth(self.username, self.password)
r2 = requests.get(info['realm'], params=params,
auth=auth, verify=self.verify_ssl)
if r2.status_code == 401:
raise RuntimeError("Authentication Error")
r2.raise_for_status()
self.auth = BearerAuth(r2.json()['token'])
def _get(self, endpoint):
url = '{0}/v2/{1}'.format(self.url, endpoint)
log.debug("GET {}".format(url))
# Try to use previous bearer token
with contexttimer.Timer() as timer:
r = requests.get(url, auth=self.auth, verify=self.verify_ssl)
log.info("GET {} - took {} sec".format(url, timer.elapsed))
# If necessary, try to authenticate and try again
if r.status_code == 401:
self._authenticate_for(r)
r = requests.get(url, auth=self.auth, verify=self.verify_ssl)
data = r.json()
if r.status_code != 200:
raise RegistryError.from_data(data)
log.debug("GOT {}: {}".format(url, pprint.pformat(data, indent=4)))
return data
def get_image_names(self, project=None):
data = self._get('_catalog')
return [image for image in data['repositories']]
def get_image_tags(self, image_name):
endpoint = '{name}/tags/list'.format(name=image_name)
return self._get(endpoint)['tags']
def get_manifest(self, name, reference):
data = self._get(
'{name}/manifests/{reference}'.format(name=name,
reference=reference))
pprint.pprint(data)
| ngc-container-replicator-master | python/nvidia_deepops/docker/registry/dockregistry.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import base64
import logging
import pprint
import contexttimer
import requests
from nvidia_deepops import utils
from nvidia_deepops.docker.registry.base import BaseRegistry
log = utils.get_logger(__name__, level=logging.INFO)
dev = utils.get_logger("devel", level=logging.ERROR)
__all__ = ('NGCRegistry',)
class NGCRegistry(BaseRegistry):
def __init__(self, api_key, nvcr_url='nvcr.io',
nvcr_api_url=None,
ngc_auth_url=None):
self.api_key = api_key
self.api_key_b64 = base64.b64encode(
api_key.encode("utf-8")).decode("utf-8")
self.url = nvcr_url
nvcr_api_url = 'https://api.ngc.nvidia.com' if nvcr_api_url is None \
else nvcr_api_url
self._nvcr_api_url = nvcr_api_url
ngc_auth_url = 'https://authn.nvidia.com' if ngc_auth_url is None \
else ngc_auth_url
self._ngc_auth_url = ngc_auth_url
self._token = None
self.orgs = None
self.default_org = None
self._authenticate_for(None)
def _authenticate_for(self, resp):
"""
Authenticate to satsify the unauthorized response
"""
# Invalidate current bearer token
self._token = None
# Future-proofing the API so the response from the failed request could
# be evaluated here
# Request a token from the auth server
req = requests.get(
url="{}/token?scope=group/ngc".format(self._ngc_auth_url),
headers={
'Authorization': 'ApiKey {}'.format(self.api_key_b64),
'Accept': 'application/json',
}
)
# Raise error on failed request
req.raise_for_status()
# Set new Bearer Token
self._token = req.json()['token']
# Unfortunately NGC requests require an org-name, even for requests
# where the org-name is extra/un-needed information.
# To handle this condition, we will get the list of orgs the user
# belongs to
if not self.orgs:
log.debug("no org list - fetching that now")
data = self._get("orgs")
self.orgs = data['organizations']
self.default_org = self.orgs[0]['name']
log.debug("default_org: {}".format(self.default_org))
@property
def token(self):
if not self._token:
self._authenticate_for(None)
if not self._token:
raise RuntimeError(
"NGC Bearer token is not set; this is unexpected")
return self._token
def _get(self, endpoint):
dev.debug("GET %s" % self._api_url(endpoint))
# try to user current bearer token; this could result in a 401 if the
# token is expired
with contexttimer.Timer() as timer:
req = requests.get(self._api_url(endpoint), headers={
'Authorization': 'Bearer {}'.format(self.token),
'Accept': 'application/json',
})
log.info("GET {} - took {} sec".format(self._api_url(endpoint),
timer.elapsed))
if req.status_code == 401:
# re-authenticate and repeat the request - failure here is final
self._authenticate_for(req)
req = requests.get(self._api_url(endpoint), headers={
'Authorization': 'Bearer {}'.format(self.token),
'Accept': 'application/json',
})
req.raise_for_status()
data = req.json()
dev.debug("GOT {}: {}".format(self._api_url(
endpoint), pprint.pformat(data, indent=4)))
return data
def _api_url(self, endpoint):
return "{}/v2/".format(self._nvcr_api_url) + endpoint
def _get_repo_data(self, project=None):
"""
Returns a list of dictionaries containing top-level details for each
image.
:param project: optional project/namespace; filter on all `nvidia` or
`nvidian_sas` projects
:return: list of dicts with the following format:
{
"requestStatus": {
"statusCode": "SUCCESS",
"requestId": "edbbaccf-f1f0-4107-b2ba-47bda0b4b308"
},
"repositories": [
{
"isReadOnly": true,
"isPublic": true,
"namespace": "nvidia",
"name": "caffe",
"description": "## What is NVCaffe?\n\nCaffe is a deep
learning framework ...",
},
{
"isReadOnly": true,
"isPublic": true,
"namespace": "nvidia",
"name": "caffe2",
"description": "## What is Caffe2?\n\nCaffe2 is a
deep-learning framework ...",
},
...
]
}
"""
def in_project(img):
if project:
return img["namespace"] == project
return True
def update(image):
image["image_name"] = image["namespace"] + "/" + image["name"]
return image
data = self._get(
"org/{}/repos?include-teams=true&include-public=true"
.format(self.default_org))
return [update(image)
for image in data["repositories"] if in_project(image)]
def get_image_names(self, project=None, cache=None):
"""
Returns a list of image names optionally filtered on project. All
names include the base project/namespace.
:param project: optional filter, e.g. project="nvidia" filters all
"nvidia/*" images
:return: ["nvidia/caffe", "nvidia/cuda", ...]
"""
return [image["image_name"]
for image in cache or self._get_repo_data(project=project)]
def get_image_descriptions(self, project=None, cache=None):
return {image['image_name']: image["description"]
for image in cache or self._get_repo_data(project=project)}
def get_image_tags(self, image_name, cache=None):
"""
Returns only the list of tag names similar to how the v2 api behaves.
:param image_name: should consist of `<project>/<repo>`, e.g.
`nvidia/caffe`
:return: list of tag strings: ['17.07', '17.06', ... ]
"""
return [image['tag']
for image in cache or self._get_image_data(image_name)]
def _get_image_data(self, image_name):
"""
Returns tags and other attributes of interest for each version of
`image_name`
:param image_name: should consist of `<project>/<repo>`, e.g.
`nvidia/caffe`
:return: list of dicts for each tag with the following format:
{
"requestStatus": {
"statusCode": "SUCCESS",
"requestId": "49468dff-8cba-4dcf-a841-a8bd43495fb5"
},
"images": [
{
"updatedDate": "2017-12-04T05:56:41.1440512Z",
"tag": "17.12",
"user": {},
"size": 1350502380
},
{
"updatedDate": "2017-11-16T21:19:08.363176299Z",
"tag": "17.11",
"user": {},
"size": 1350349188
},
]
}
"""
org_name, repo_name = image_name.split('/')
endpoint = "org/{}/repos/{}/images".format(org_name, repo_name)
return self._get(endpoint).get('images', [])
def get_state(self, project=None, filter_fn=None):
names = self.get_image_names(project=project)
state = collections.defaultdict(dict)
for name in names:
image_data = self._get_image_data(name)
for image in image_data:
tag = image["tag"]
docker_id = image["updatedDate"]
if filter_fn is not None and callable(filter_fn):
if not filter_fn(name=name, tag=tag, docker_id=docker_id):
# if filter_fn is false, then the image is not added to
# the state
continue
state[name][tag] = {
"docker_id": docker_id,
"registry": "nvcr.io",
}
return state
| ngc-container-replicator-master | python/nvidia_deepops/docker/registry/ngcregistry.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import base64
import logging
import contexttimer
import requests
from nvidia_deepops import utils
from nvidia_deepops.docker.registry.base import BaseRegistry
log = utils.get_logger(__name__, level=logging.INFO)
dev = utils.get_logger("devel", level=logging.ERROR)
__all__ = ('DGXRegistry',)
class DGXRegistry(BaseRegistry):
def __init__(self, api_key, nvcr_url='nvcr.io',
nvcr_api_url=None):
self.api_key = api_key
self.api_key_b64 = base64.b64encode(api_key.encode("utf-8"))\
.decode("utf-8")
self.url = nvcr_url
nvcr_api_url = 'https://compute.nvidia.com' if nvcr_api_url is None \
else nvcr_api_url
self._nvcr_api_url = nvcr_api_url
def _get(self, endpoint):
dev.debug("GET %s" % self._api_url(endpoint))
with contexttimer.Timer() as timer:
req = requests.get(self._api_url(endpoint), headers={
'Authorization': 'APIKey {}'.format(self.api_key_b64),
'Accept': 'application/json',
})
log.info("GET {} - took {} sec".format(self._api_url(endpoint),
timer.elapsed))
req.raise_for_status()
data = req.json()
# dev.debug("GOT {}: {}".format(self._api_url(endpoint),
# pprint.pformat(data, indent=4)))
return data
def _api_url(self, endpoint):
return "{}/rest/api/v1/".format(self._nvcr_api_url) + endpoint
def _get_repo_data(self, project=None):
"""
Returns a list of dictionaries containing top-level details for each
image.
:param project: optional project/namespace; filter on all `nvidia` or
`nvidian_sas` projects
:return: list of dicts with the following format:
{
"isReadOnly": true,
"isPublic": true,
"namespace": "nvidia",
"name": "caffe2",
"description": "## What is Caffe2?\n\nCaffe2 is a deep-learning
framework ... "
}
"""
def in_project(img):
if project:
return img["namespace"] == project
return True
def update(image):
image["image_name"] = image["namespace"] + "/" + image["name"]
return image
data = self._get("repository?includePublic=true")
return [update(image) for image in data["repositories"]
if in_project(image)]
def get_image_names(self, project=None, cache=None):
"""
Returns a list of image names optionally filtered on project. All
names include the base project/namespace.
:param project: optional filter, e.g. project="nvidia" filters all
"nvidia/*" images
:return: ["nvidia/caffe", "nvidia/cuda", ...]
"""
return [image["image_name"]
for image in cache or self._get_repo_data(project=project)]
def get_image_descriptions(self, project=None, cache=None):
return {image['image_name']: image.get("description", "")
for image in cache or self._get_repo_data(project=project)}
def get_image_tags(self, image_name, cache=None):
"""
Returns only the list of tag names similar to how the v2 api behaves.
:param image_name: should consist of `<project>/<repo>`, e.g.
`nvidia/caffe`
:return: list of tag strings: ['17.07', '17.06', ... ]
"""
return [tag['name']
for tag in cache or self._get_image_data(image_name)]
def _get_image_data(self, image_name):
"""
Returns tags and other attributes of interest for each version of
`image_name`
:param image_name: should consist of `<project>/<repo>`, e.g.
`nvidia/caffe`
:return: list of dicts for each tag with the following format:
{
"dockerImageId": "9c496e628c7d64badd2b587d4c0a387b0db00...",
"lastModified": "2017-03-27T18:48:21.000Z",
"name": "17.03",
"size": 1244439426
}
"""
endpoint = "/".join(["repository", image_name])
return self._get(endpoint)['tags']
def get_state(self, project=None, filter_fn=None):
names = self.get_image_names(project=project)
state = collections.defaultdict(dict)
for name in names:
for tag in self._get_image_data(name):
if filter_fn is not None and callable(filter_fn):
if not filter_fn(name=name, tag=tag["name"],
docker_id=tag["dockerImageId"]):
continue
state[name][tag["name"]] = {
"docker_id": tag["dockerImageId"],
"registry": "nvcr.io",
}
return state
| ngc-container-replicator-master | python/nvidia_deepops/docker/registry/dgxregistry.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .base import *
from .dockregistry import *
from .dgxregistry import *
from .ngcregistry import *
| ngc-container-replicator-master | python/nvidia_deepops/docker/registry/__init__.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
__all__ = ('BaseRegistry',)
ABC = abc.ABCMeta('ABC', (object,), {}) # compatible with Python 2 *and* 3
class BaseRegistry(ABC):
@abc.abstractmethod
def get_image_names(self, project=None):
raise NotImplementedError()
@abc.abstractmethod
def get_image_tags(self, image_name):
raise NotImplementedError()
@abc.abstractmethod
def get_state(self, project=None, filter_fn=None):
"""
Returns a unique hash for each image and tag with the ability to filter
on the project/prefix.
:param str project: Filter images on the prefix, e.g. project="nvidia"
filters all `nvidia/*` images
:param filter_fn: Callable function that takes (name, tag, docker_id)
kwargs and returns true/false. Ff the image should be included in
the returned set.
:return: dict of dicts
{
"image_name_A": {
"tag_1": "dockerImageId_1",
"tag_2": "dockerImageId_2",
}, ...
}
"""
raise NotImplementedError()
def docker_url(self, name, tag):
return "{}/{}:{}".format(self.url, name, tag)
def get_images_and_tags(self, project=None):
"""
Returns a dict keyed on image_name with values as a list of tags names
:param project: optional filter on image_name, e.g. project='nvidia'
filters all 'nvidia/*' images
:return: Dict key'd by image names. Dict val are lists of tags. Ex.:
{
"nvidia/pytorch": ["17.07"],
"nvidia/tensorflow": ["17.07", "17.06"],
}
"""
image_names = self.get_image_names(project=project)
return {name: self.get_image_tags(name) for name in image_names}
| ngc-container-replicator-master | python/nvidia_deepops/docker/registry/base.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .base import *
from .dockercli import *
from .dockerpy import *
| ngc-container-replicator-master | python/nvidia_deepops/docker/client/__init__.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import docker
from nvidia_deepops import utils
from nvidia_deepops.docker.client.base import BaseClient
__all__ = ('DockerPy',)
log = utils.get_logger(__name__, level=logging.INFO)
class DockerPy(BaseClient):
def __init__(self):
self.client = docker.from_env(timeout=600)
def login(self, *, username, password, registry):
self.client.login(username=username,
password=password, registry=registry)
def get(self, *, url):
try:
return self.client.images.get(url)
except docker.errors.ImageNotFound:
return None
def pull(self, url):
log.debug("docker pull %s" % url)
self.client.images.pull(url)
def push(self, url):
log.debug("docker push %s" % url)
self.client.images.push(url)
def tag(self, src_url, dst_url):
log.debug("docker tag %s --> %s" % (src_url, dst_url))
image = self.client.images.get(src_url)
image.tag(dst_url)
def remove(self, url):
log.debug("docker rmi %s" % url)
self.client.images.remove(url)
def url2filename(self, url):
return "docker_image_{}.tar".format(url).replace("/", "%%")
def filename2url(self, filename):
return os.path.basename(filename).replace("docker_image_", "")\
.replace(".tar", "").replace("%%", "/")
def save(self, url, path=None):
filename = self.url2filename(url)
if path:
filename = os.path.join(path, filename)
log.debug("saving %s --> %s" % (url, filename))
image = self.client.api.get_image(url)
with open(filename, "wb") as tarfile:
tarfile.write(image.data)
return filename
def load(self, filename):
log.debug("loading image from %s" % filename)
with open(filename, "rb") as file:
self.client.images.load(file)
basename = os.path.basename(filename)
if basename.startswith("docker_image_"):
url = self.filename2url(filename)
log.debug("expected url from %s is %s" % (filename, url))
return url
| ngc-container-replicator-master | python/nvidia_deepops/docker/client/dockerpy.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import shlex
import subprocess
import sys
import docker
from nvidia_deepops import utils
from nvidia_deepops.docker.client.base import BaseClient
__all__ = ('DockerClient',)
log = utils.get_logger(__name__, level=logging.INFO)
class DockerClient(BaseClient):
def __init__(self):
self.client = docker.from_env(timeout=600)
def call(self, command, stdout=None, stderr=None, quiet=False):
stdout = stdout or sys.stderr
stderr = stderr or sys.stderr
if quiet:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
log.debug(command)
subprocess.check_call(shlex.split(command), stdout=stdout,
stderr=stderr)
def login(self, *, username, password, registry):
self.call(
"docker login -u {} -p {} {}".format(username, password, registry))
self.client.login(username=username,
password=password, registry=registry)
def get(self, *, url):
try:
return self.client.images.get(url)
except docker.errors.ImageNotFound:
return None
def pull(self, url):
self.call("docker pull %s" % url)
return url
def push(self, url):
self.call("docker push %s" % url)
return url
def tag(self, src_url, dst_url):
self.call("docker tag %s %s" % (src_url, dst_url))
return dst_url
def remove(self, url):
self.call("docker rmi %s" % url)
return url
def url2filename(self, url):
return "docker_image_{}.tar".format(url).replace("/", "%%")
def filename2url(self, filename):
return os.path.basename(filename).replace("docker_image_", "")\
.replace(".tar", "").replace("%%", "/")
def save(self, url, path=None):
filename = self.url2filename(url)
if path:
filename = os.path.join(path, filename)
self.call("docker save -o {} {}".format(filename, url))
return filename
def image_exists(self, url):
try:
self.call("docker image inspect {}".format(url), quiet=True)
return True
except Exception:
return False
def load(self, filename, expected_url=None):
url = expected_url or self.filename2url(filename)
basename = os.path.basename(filename)
if expected_url is None and not basename.startswith("docker_image_"):
raise RuntimeError("Invalid filename")
self.call("docker load -i %s" % filename)
if not self.image_exists(url):
log.error("expected url from %s is %s" % (filename, url))
raise RuntimeError("Image {} not found".format(url))
log.debug("loaded {} from {}".format(url, filename))
return url
def build(self, *, target_image, dockerfile="Dockerfile"):
self.call("docker build -f {} -t {} .".format(dockerfile, target_image)) | ngc-container-replicator-master | python/nvidia_deepops/docker/client/dockercli.py |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
__all__ = ('BaseClient',)
ABC = abc.ABCMeta('ABC', (object,), {}) # compatible with Python 2 *and* 3
class BaseClient(ABC):
@abc.abstractmethod
def pull(self, url):
raise NotImplementedError()
@abc.abstractmethod
def tag(self, src_url, dst_url):
raise NotImplementedError()
@abc.abstractmethod
def push(self, url):
raise NotImplementedError()
@abc.abstractmethod
def remove(self, url):
raise NotImplementedError()
| ngc-container-replicator-master | python/nvidia_deepops/docker/client/base.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
setup_requirements = [
'pytest-runner',
# TODO(ryanolson): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest',
# TODO: put package test requirements here
]
setup(
name='ngc_replicator',
version='0.4.0',
description="NGC Replication Service",
author="Ryan Olson",
author_email='[email protected]',
url='https://github.com/ryanolson/ngc_replicator',
packages=find_packages(include=['ngc_replicator']),
entry_points={
'console_scripts': [
'ngc_replicator=ngc_replicator.ngc_replicator:main'
]
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='ngc_replicator',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| ngc-container-replicator-master | replicator/setup.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
import tempfile
"""Tests for `ngc_replicator` package."""
import pytest
from ngc_replicator import ngc_replicator
try:
from .secrets import ngcpassword, dgxpassword
HAS_SECRETS = True
except Exception:
HAS_SECRETS = False
secrets = pytest.mark.skipif(not HAS_SECRETS, reason="No secrets.py file found")
@secrets
def nvsa_replicator(*, output_path):
"""
Instance of the test NGC Registry on compute.nvidia.com (project=nvsa)
"""
return ngc_replicator.Replicator(
project="nvsa_clone",
api_key=dgxpassword,
exporter=True,
output_path=output_path,
min_version="16.04"
)
@secrets
def test_clone():
with tempfile.TemporaryDirectory() as tmpdir:
state_file = os.path.join(tmpdir, "state.yml")
assert not os.path.exists(state_file)
replicator = nvsa_replicator(output_path=tmpdir)
replicator.sync()
assert os.path.exists(state_file)
assert 'nvsa_clone/busybox' in replicator.state
| ngc-container-replicator-master | replicator/tests/test_ngc_replicator.py |
# -*- coding: utf-8 -*-
"""Unit test package for ngc_replicator."""
| ngc-container-replicator-master | replicator/tests/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: replicator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='replicator.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x10replicator.proto\"@\n\x07Request\x12\x10\n\x08org_name\x18\x01 \x01(\t\x12\x13\n\x0bmin_version\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\"6\n\x11ReplicationStatus\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x10\n\x08progress\x18\x02 \x01(\x02\";\n\x0b\x44ockerImage\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03tag\x18\x02 \x01(\t\x12\x11\n\tdocker_id\x18\x03 \x01(\t2\x9c\x01\n\nReplicator\x12\x34\n\x10StartReplication\x12\x08.Request\x1a\x12.ReplicationStatus\"\x00\x30\x01\x12(\n\nListImages\x12\x08.Request\x1a\x0c.DockerImage\"\x00\x30\x01\x12.\n\x10\x44ownloadedImages\x12\x08.Request\x1a\x0c.DockerImage\"\x00\x30\x01\x62\x06proto3')
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org_name', full_name='Request.org_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_version', full_name='Request.min_version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='images', full_name='Request.images', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=84,
)
_REPLICATIONSTATUS = _descriptor.Descriptor(
name='ReplicationStatus',
full_name='ReplicationStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='ReplicationStatus.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='progress', full_name='ReplicationStatus.progress', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=140,
)
_DOCKERIMAGE = _descriptor.Descriptor(
name='DockerImage',
full_name='DockerImage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='DockerImage.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='DockerImage.tag', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docker_id', full_name='DockerImage.docker_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=201,
)
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['ReplicationStatus'] = _REPLICATIONSTATUS
DESCRIPTOR.message_types_by_name['DockerImage'] = _DOCKERIMAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'replicator_pb2'
# @@protoc_insertion_point(class_scope:Request)
))
_sym_db.RegisterMessage(Request)
ReplicationStatus = _reflection.GeneratedProtocolMessageType('ReplicationStatus', (_message.Message,), dict(
DESCRIPTOR = _REPLICATIONSTATUS,
__module__ = 'replicator_pb2'
# @@protoc_insertion_point(class_scope:ReplicationStatus)
))
_sym_db.RegisterMessage(ReplicationStatus)
DockerImage = _reflection.GeneratedProtocolMessageType('DockerImage', (_message.Message,), dict(
DESCRIPTOR = _DOCKERIMAGE,
__module__ = 'replicator_pb2'
# @@protoc_insertion_point(class_scope:DockerImage)
))
_sym_db.RegisterMessage(DockerImage)
_REPLICATOR = _descriptor.ServiceDescriptor(
name='Replicator',
full_name='Replicator',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=204,
serialized_end=360,
methods=[
_descriptor.MethodDescriptor(
name='StartReplication',
full_name='Replicator.StartReplication',
index=0,
containing_service=None,
input_type=_REQUEST,
output_type=_REPLICATIONSTATUS,
options=None,
),
_descriptor.MethodDescriptor(
name='ListImages',
full_name='Replicator.ListImages',
index=1,
containing_service=None,
input_type=_REQUEST,
output_type=_DOCKERIMAGE,
options=None,
),
_descriptor.MethodDescriptor(
name='DownloadedImages',
full_name='Replicator.DownloadedImages',
index=2,
containing_service=None,
input_type=_REQUEST,
output_type=_DOCKERIMAGE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_REPLICATOR)
DESCRIPTOR.services_by_name['Replicator'] = _REPLICATOR
# @@protoc_insertion_point(module_scope)
| ngc-container-replicator-master | replicator/ngc_replicator/replicator_pb2.py |
# -*- coding: utf-8 -*-
"""Top-level package for NGC Replicator."""
__author__ = """Ryan Olson"""
__email__ = '[email protected]'
__version__ = '0.4.0'
| ngc-container-replicator-master | replicator/ngc_replicator/__init__.py |
# -*- coding: utf-8 -*-
import collections
import json
import logging
import os
import pprint
import re
import time
from concurrent import futures
import click
#import grpc
import yaml
from nvidia_deepops import Progress, utils
from nvidia_deepops.docker import DockerClient, NGCRegistry, DGXRegistry
from . import replicator_pb2
#from . import replicator_pb2_grpc
log = utils.get_logger(__name__, level=logging.INFO)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Replicator:
def __init__(self, *, api_key, project, **optional_config):
log.info("Initializing Replicator")
self._config = optional_config
self.project = project
self.service = self.config("service")
if len(api_key) == 40:
self.nvcr = DGXRegistry(api_key)
else:
self.nvcr = NGCRegistry(api_key)
self.nvcr_client = DockerClient()
self.nvcr_client.login(username="$oauthtoken", password=api_key, registry="nvcr.io/v2")
self.registry_client = None
self.min_version = self.config("min_version")
self.py_version = self.config("py_version")
self.images = self.config("image") or []
self.progress = Progress(uri=self.config("progress_uri"))
if self.config("registry_url"):
self.registry_url = self.config("registry_url")
self.registry_client = DockerClient()
if self.config("registry_username") and self.config("registry_password"):
self.registry_client.login(username=self.config("registry_username"),
password=self.config("registry_password"),
registry=self.config("registry_url"))
self.output_path = self.config("output_path") or "/output"
self.state_path = os.path.join(self.output_path, "state.yml")
self.state = collections.defaultdict(dict)
if os.path.exists(self.state_path):
with open(self.state_path, "r") as file:
tmp = yaml.load(file, Loader=yaml.UnsafeLoader)
if tmp:
for key, val in tmp.items():
self.state[key] = val
self.export_to_tarfile = self.config("exporter")
self.third_party_images = []
if self.config("external_images"):
self.third_party_images.extend(self.read_external_images_file())
if self.export_to_tarfile:
log.info("tarfiles will be saved to {}".format(self.output_path))
self.export_to_singularity = self.config("singularity")
if self.export_to_singularity:
log.info("singularity images will be saved to {}".format(self.output_path))
log.info("Replicator initialization complete")
def read_external_images_file(self):
with open(self.config("external_images"), "r") as file:
data = yaml.load(file, Loader=yaml.UnsafeLoader)
images = data.get("images", [])
images = [replicator_pb2.DockerImage(name=image["name"], tag=image.get("tag", "latest")) for image in images]
return images
def config(self, key, default=None):
return self._config.get(key, default)
def save_state(self):
with open(self.state_path, "w") as file:
yaml.dump(self.state, file)
def sync(self, project=None):
log.info("Replicator Started")
# pull images
new_images = {image.name: image.tag for image in self.sync_images(project=project)}
# pull image descriptions - new_images should be empty for dry runs
self.progress.update_step(key="markdown", status="running")
self.update_progress()
descriptions = self.nvcr.get_image_descriptions(project=project)
for image_name, _ in new_images.items():
markdown = os.path.join(self.output_path, "description_{}.md".format(image_name.replace('/', '%%')))
with open(markdown, "w") as out:
out.write(descriptions.get(image_name, ""))
self.progress.update_step(key="markdown", status="complete")
self.update_progress()
log.info("Replicator finished")
def sync_images(self, project=None):
project = project or self.project
for image in self.images_to_download(project=project):
if self.config("dry_run"):
click.echo("[dry-run] clone_image({}, {}, {})".format(image.name, image.tag, image.docker_id))
continue
log.info("Pulling {}:{}".format(image.name, image.tag))
self.clone_image(image.name, image.tag, image.docker_id) # independent
self.state[image.name][image.tag] = image.docker_id # dep [clone]
yield image
self.save_state()
def images_to_download(self, project=None):
project = project or self.project
self.progress.add_step(key="query", status="running", header="Getting list of Docker images to clone")
self.update_progress(progress_length_unknown=True)
# determine images and tags (and dockerImageIds) from the remote registry
if self.config("strict_name_match"):
filter_fn = self.filter_on_tag_strict if self.min_version or self.images else None
else:
filter_fn = self.filter_on_tag if self.min_version or self.images else None
remote_state = self.nvcr.get_state(project=project, filter_fn=filter_fn)
# determine which images need to be fetch for the local state to match the remote
to_pull = self.missing_images(remote_state)
# sort images into two buckets: cuda and not cuda
cuda_images = { key: val for key, val in to_pull.items() if key.endswith("cuda") }
other_images = { key: val for key, val in to_pull.items() if not key.endswith("cuda") }
all_images = [image for image in self.images_from_state(cuda_images)]
all_images.extend([image for image in self.images_from_state(other_images)])
if self.config("external_images"):
all_images.extend(self.third_party_images)
for image in all_images:
self.progress.add_step(key="{}:{}".format(image.name, image.tag),
header="Cloning {}:{}".format(image.name, image.tag),
subHeader="Waiting to pull image")
self.progress.add_step(key="markdown", header="Downloading NVIDIA Deep Learning READMEs")
self.progress.update_step(key="query", status="complete")
self.update_progress()
for image in self.images_from_state(cuda_images):
yield image
for image in self.images_from_state(other_images):
yield image
if self.config("external_images"):
for image in self.third_party_images:
yield image
def update_progress(self, progress_length_unknown=False):
self.progress.post(progress_length_unknown=progress_length_unknown)
@staticmethod
def images_from_state(state):
for image_name, tag_data in state.items():
for tag, docker_id in tag_data.items():
yield replicator_pb2.DockerImage(name=image_name, tag=tag, docker_id=docker_id.get("docker_id", ""))
def clone_image(self, image_name, tag, docker_id):
if docker_id:
url = self.nvcr.docker_url(image_name, tag=tag)
else:
url = "{}:{}".format(image_name, tag)
if self.export_to_tarfile:
tarfile = self.nvcr_client.url2filename(url)
if os.path.exists(tarfile):
log.warning("{} exists; removing and rebuilding".format(tarfile))
os.remove(tarfile)
log.info("cloning %s --> %s" % (url, tarfile))
self.progress.update_step(key="{}:{}".format(image_name, tag), status="running", subHeader="Pulling image from Registry")
self.update_progress()
self.nvcr_client.pull(url)
self.progress.update_step(key="{}:{}".format(image_name, tag), status="running", subHeader="Saving image to tarfile")
self.update_progress()
self.nvcr_client.save(url, path=self.output_path)
self.progress.update_step(key="{}:{}".format(image_name, tag), status="complete", subHeader="Saved {}".format(tarfile))
log.info("Saved image: %s --> %s" % (url, tarfile))
if self.export_to_singularity:
sif = os.path.join(self.output_path, "{}.sif".format(url).replace("/", "_"))
if os.path.exists(sif):
log.warning("{} exists; removing and rebuilding".format(sif))
os.remove(sif)
log.info("cloning %s --> %s" % (url, sif))
self.progress.update_step(key="{}:{}".format(image_name, tag), status="running", subHeader="Pulling image from Registry")
self.update_progress()
self.nvcr_client.pull(url)
self.progress.update_step(key="{}:{}".format(image_name, tag), status="running", subHeader="Saving image to singularity image file")
self.update_progress()
utils.execute("singularity build {} docker-daemon://{}".format(sif, url))
self.progress.update_step(key="{}:{}".format(image_name, tag), status="complete", subHeader="Saved {}".format(sif))
log.info("Saved image: %s --> %s" % (url, sif))
if self.registry_client:
push_url = "{}/{}:{}".format(self.registry_url, image_name, tag)
self.nvcr_client.pull(url)
self.registry_client.tag(url, push_url)
self.registry_client.push(push_url)
self.registry_client.remove(push_url)
if not self.config("no_remove") and not image_name.endswith("cuda") and self.nvcr_client.get(url=url):
try:
self.nvcr_client.remove(url)
except:
log.warning("tried to remove docker image {}, but unexpectedly failed".format(url))
return image_name, tag, docker_id
def filter_on_tag(self, *, name, tag, docker_id, strict_name_match=False):
"""
Filter function used by the `nvidia_deepops` library for selecting images.
Return True if the name/tag/docker_id combo should be included for consideration.
Return False and the image will be excluded from consideration, i.e. not cloned/replicated.
"""
if self.images:
log.debug("filtering on images name, only allow {}".format(self.images))
found = False
for image in self.images:
if (not strict_name_match) and (image in name):
log.debug("{} passes filter; matches {}".format(name, image))
found = True
elif (strict_name_match) and image.strip() == (name.split('/')[-1]).strip():
log.debug("{} passes strict filter; matches {}".format(name, image))
found = True
if not found:
log.debug("{} fails filter by image name".format(name))
return False
# if you are here, you have passed the name test
# now, we check the version of the container by trying to extract the YY.MM details from the tag
if self.py_version:
if tag.find(self.py_version) == -1:
log.debug("tag {} fails py_version {} filter".format(tag, self.py_version))
return False
version_regex = re.compile(r"^(\d\d\.\d\d)")
float_tag = version_regex.findall(tag)
if float_tag and len(float_tag) == 1:
try:
# this is a bit ugly, but if for some reason the cast of float_tag[0] or min_verison fail
# we fallback to safety and skip tag filtering
val = float(float_tag[0])
lower_bound = float(self.min_version)
if val < lower_bound:
return False
except Exception:
pass
# if you are here, you have passed the tag test
return True
def filter_on_tag_strict(self, *, name, tag, docker_id):
return self.filter_on_tag(name=name, tag=tag, docker_id=docker_id, strict_name_match=True)
def missing_images(self, remote):
"""
Generates a dict of dicts on a symmetric difference between remote/local which also includes
any image/tag pair in both but with differing dockerImageIds.
:param remote: `image_name:tag:docker_id` of remote content
:param local: `image_name:tag:docker_id` of local content
:return: `image_name:tag:docker_id` for each missing or different entry in remote but not in local
"""
to_pull = collections.defaultdict(dict)
local = self.state
# determine which images are not present
image_names = set(remote.keys()) - set(local.keys())
for image_name in image_names:
to_pull[image_name] = remote[image_name]
# log.debug("remote image names: %s" % remote.keys())
# log.debug("local image names: %s" % local.keys())
log.debug("image names not present: %s" % to_pull.keys())
# determine which tags are not present
for image_name, tag_data in remote.items():
tags = set(tag_data.keys()) - set(local[image_name].keys())
# log.debug("remote %s tags: %s" % (image_name, tag_data.keys()))
# log.debug("local %s tags: %s" % (image_name, local[image_name].keys()))
log.debug("tags not present for image {}: {}".format(image_name, tags))
for tag in tags:
to_pull[image_name][tag] = remote[image_name][tag]
# determine if any name/tag pairs have a different dockerImageId than previously seen
# this handles the cases where someone push a new images and overwrites a name:tag image
for image_name, tag_data in remote.items():
if image_name not in local: continue
for tag, docker_id in tag_data.items():
if tag not in local[image_name]: continue
if docker_id.get("docker_id") != local[image_name][tag]:
log.debug("%s:%s changed on server" % (image_name, tag))
to_pull[image_name][tag] = docker_id
log.info("images to be fetched: %s" % pprint.pformat(to_pull, indent=4))
return to_pull
## class ReplicatorService(replicator_pb2_grpc.ReplicatorServicer):
##
## def __init__(self, *, replicator):
## self.replicator = replicator
## self.replicator.service = True
##
## def StartReplication(self, request, context):
## project = request.org_name or self.replicator.project
## for image in self.replicator.sync_images(project=project):
## yield image
##
## def ListImages(self, request, context):
## project = request.org_name or self.replicator.project
## for image in self.replicator.images_to_download(project=project):
## yield image
## # images_and_tags = self.replicator.nvcr.get_images_and_tags(project=project)
## # for image_name, tags in images_and_tags.items():
## # for tag in tags:
## # yield replicator_pb2.DockerImage(name=image_name, tag=tag)
##
## def DownloadedImages(self, request, context):
## for images in self.replicator.images_from_state(self.replicator.state):
## yield images
@click.command()
@click.option("--api-key", envvar="NGC_REPLICATOR_API_KEY")
@click.option("--project", default="nvidia")
@click.option("--output-path", default="/output")
@click.option("--min-version")
@click.option("--py-version")
@click.option("--image", multiple=True)
@click.option("--registry-url")
@click.option("--registry-username")
@click.option("--registry-password")
@click.option("--dry-run", is_flag=True)
@click.option("--service", is_flag=True)
@click.option("--external-images")
@click.option("--progress-uri")
@click.option("--no-remove", is_flag=True)
@click.option("--exporter/--no-exporter", default=True)
@click.option("--templater/--no-templater", default=False)
@click.option("--singularity/--no-singularity", default=False)
@click.option("--strict-name-match/--no-strict-name-match", default=False)
def main(**config):
"""
NGC Replication Service
"""
if config.get("api_key", None) is None:
click.echo("API key required; use --api-key or NGC_REPLICATOR_API_KEY", err=True)
raise click.Abort
replicator = Replicator(**config)
if replicator.service:
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
# replicator_pb2_grpc.add_ReplicatorServicer_to_server(
# ReplicatorService(replicator=replicator), server
# )
# server.add_insecure_port('[::]:50051')
# log.info("starting GRPC service on port 50051")
# server.start()
# try:
# while True:
# time.sleep(_ONE_DAY_IN_SECONDS)
# except KeyboardInterrupt:
# server.stop(0)
raise NotImplementedError("GPRC Service has been depreciated")
else:
replicator.sync()
if __name__ == "__main__":
main(auto_envvar_prefix='NGC_REPLICATOR')
| ngc-container-replicator-master | replicator/ngc_replicator/ngc_replicator.py |
# Magnum IO Developer Environment container recipe
Stage0 += comment('GENERATED FILE, DO NOT EDIT')
Stage0 += baseimage(image='nvcr.io/nvidia/cuda:11.4.0-devel-ubuntu20.04')
# GDS 1.0 is part of the CUDA base image
Stage0 += nsight_systems(cli=True, version='2021.2.1')
Stage0 += mlnx_ofed(version='5.3-1.0.0.1')
Stage0 += gdrcopy(ldconfig=True, version='2.2')
Stage0 += ucx(version='1.10.1', cuda=True,
gdrcopy='/usr/local/gdrcopy', ldconfig=True,
disable_static=True, enable_mt=True)
Stage0 += nvshmem(version='2.2.1') # See hack in instaler.sh for 2.2.1 artifact renaming
Stage0 += nccl(cuda='11.4', version='2.10.3-1')
Stage0 += apt_get(ospackages=['cuda-tools-11-4'])
Stage0 += copy(src=['magnum-io.Dockerfile', 'third_party.txt', 'README.md'], dest='/')
Stage0 += environment(variables={'MAGNUM_IO_VERSION': '21.07'})
Stage0 += raw(docker='SHELL ["/bin/bash", "-c"]\n\
CMD ["/bin/bash" ]')
| MagnumIO-main | dev-env/magnum-io-hpccm.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Basics
import os
import sys
import numpy as np
import time
import subprocess as sp
import psutil
# Torch
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.cuda.amp as amp
import torch.distributed as dist
#MPI
from mpi4py import MPI
#print helper
def printr(msg, comm, rank=0):
if comm.Get_rank() == rank:
print(msg)
# Custom
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#from utils import utils
from utils import losses
from utils import model_handler as mh
from utils import parser
#DALI
from data import cam_numpy_dali_dataset as cam
def main(pargs):
#init MPI
comm = MPI.COMM_WORLD.Dup()
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
num_devices = torch.cuda.device_count() if pargs.device_count is None else pargs.device_count
comm_local_rank = comm_rank % num_devices
# parameters fro prediction
visualize = pargs.visualize
use_gds = pargs.enable_gds
use_fp16 = pargs.enable_fp16
use_trt = pargs.enable_trt
use_graphs = pargs.enable_graphs
use_nhwc = pargs.enable_nhwc
do_inference = (pargs.mode == "inference")
do_train = (pargs.mode == "train")
drop_fs_cache = pargs.drop_fs_cache
preprocess = pargs.preprocess
channels = [0,1,2,10]
batch_size = pargs.batch_size
local_batch_size = batch_size // comm_size
max_threads = pargs.max_inter_threads
if visualize and pargs.visualization_output_dir is None:
raise ValueError("Please specify a valid --visualization_output_dir if you want to visualize the results.")
# enable distributed training if requested
if do_train:
addrport = os.getenv("PMIX_SERVER_URI2").split("//")[1]
comm_addr = addrport.split(":")[0]
comm_port = "29500"
os.environ["MASTER_ADDR"] = comm_addr
os.environ["MASTER_PORT"] = comm_port
dist.init_process_group(backend = "nccl",
rank = comm_rank,
world_size = comm_size)
# parameters for visualization
if (pargs.visualization_output_dir is not None) and visualize:
predict_dir = os.path.join(pargs.visualization_output_dir, "predict")
os.makedirs(predict_dir, exist_ok=True)
truth_dir = os.path.join(pargs.visualization_output_dir, "true")
os.makedirs(truth_dir, exist_ok=True)
# Initialize run
rng = np.random.RandomState(seed=333)
torch.manual_seed(333)
# Define architecture
if torch.cuda.is_available():
printr("Using GPUs", comm, 0)
if pargs.device_id is not None:
device = torch.device("cuda", pargs.device_id)
else:
device = torch.device("cuda", comm_local_rank)
torch.cuda.set_device(device)
torch.backends.cudnn.benchmark = True
else:
printr("Using CPUs", comm, 0)
device = torch.device("cpu")
# create model handler
model = mh.ModelHandler(pargs,
channels,
local_batch_size,
device,
comm_size,
comm_rank,
run_in_stream = use_gds)
# set up for training
if do_train:
model.net_fw.train()
gscaler = amp.GradScaler(enabled = use_fp16)
optimizer = optim.AdamW(model.net_fw.parameters(), lr = 1.e-4)
criterion = losses.fp_loss
# Get data
data_dirs = pargs.data_dirs
data_loader = cam.CamDaliDataloader(data_dirs,
prefix_data = "data-*.npy",
prefix_label = "label-*.npy" if do_train or visualize else None,
channels = channels,
batchsize = local_batch_size,
num_threads = max_threads,
device = device.index,
num_shards = comm_size,
shard_id = comm_rank,
stick_to_shard = not pargs.global_shuffle,
lazy_init = True,
read_gpu = use_gds,
use_mmap = not pargs.disable_mmap,
shuffle = pargs.shuffle,
preprocess = preprocess)
#create vizc instance
if visualize:
viz = vizc.CamVisualizer()
printr("starting benchmark", comm, 0)
# for cpu itilization measurements
cpu_util = []
#do multiple experiments if requested
for nr in range(-pargs.num_warmup_runs, pargs.num_runs):
# flush the caches
if drop_fs_cache and (comm_rank == 0):
print("Dropping caches")
with open("/proc/sys/vm/drop_caches", "w") as outfile:
sp.run(["echo", "1"], stdout=outfile)
#sync up
printr(f"Running iteration {nr}", comm, 0)
comm.barrier()
#start time
tstart = time.time()
it = 0
# set baseline cpu % interval
psutil.cpu_percent(interval=None)
# do the loop
for inputs, labels, source in data_loader:
#increase iteration count
it += 1
# run model
outputs, labels = model.run(inputs, labels)
#training?
if do_train:
with torch.cuda.stream(model.fw_stream):
with amp.autocast(enabled = use_fp16):
loss = criterion(outputs, labels, [1., 1., 1.])
#BW
gscaler.scale(loss).backward()
gscaler.step(optimizer)
gscaler.update()
if do_inference:
with torch.cuda.stream(model.fw_stream):
with torch.no_grad():
predictions = torch.max(outputs, 1)[1]
#do we want to plot?
if visualize:
#extract tensors as numpy arrays
datatens = inputs.cpu().detach().numpy()
predtens = predictions.cpu().detach().numpy()
labeltens = labels.cpu().detach().numpy()
for i in range(0,len(source)):
print("visualizing " + source[i])
npypath = source[i]
npybase = os.path.basename(npypath)
year = npybase[5:9]
month = npybase[10:12]
day = npybase[13:15]
hour = npybase[16:18]
viz.plot(os.path.join(predict_dir, os.path.splitext(os.path.basename(npybase))[0]),
"Predicted",
np.squeeze(datatens[i,0,...]),
np.squeeze(predtens[i,...]),
year=year,
month=month,
day=day,
hour=hour)
viz.plot(os.path.join(truth_dir, os.path.splitext(os.path.basename(npybase))[0]),
"Ground truth",
np.squeeze(datatens[i,0,...]),
np.squeeze(labeltens[i,...]),
year=year,
month=month,
day=day,
hour=hour)
# cpu %: measure here so that estimate is more conservative (higher)
cpu_util.append(psutil.cpu_percent(interval=None))
#sync up
model.sync()
comm.barrier()
# communicate cpu utilization
cpu_util_arr = np.array(cpu_util, dtype=np.float32)
cpu_util_arr = np.stack(comm.allgather(cpu_util_arr), axis=0)
# compute average per rank:
cpu_util_arr = np.mean(cpu_util_arr, axis=1)
#end time: measure here so that estimate is more conservative (lower)
tend = time.time()
printr("inference complete\n", comm, 0)
printr("total time: {:.2f} seconds for {} samples".format(tend - tstart, it * batch_size), comm, 0)
printr("iteration time: {:.4f} seconds/sample".format((tend - tstart)/float(it * batch_size)), comm, 0)
printr("throughput: {:.2f} samples/second".format(float(it * batch_size)/(tend - tstart)), comm, 0)
data_size, label_size = data_loader.sample_sizes
sample_size = (data_size + label_size) / 1024 / 1024 / 1024
printr("bandwidth: {:.2f} GB/s".format(float(it * batch_size * sample_size) / (tend - tstart)), comm, 0)
printr(f"cpu utilization: {np.mean(cpu_util_arr):.2f}% (min: {np.min(cpu_util_arr):.2f}%, max: {np.max(cpu_util_arr):.2f}%)", comm, 0)
#write results to file
if (nr >= 0) and (comm_rank == 0):
mode = ('a' if nr > 0 else 'w+')
with open(pargs.outputfile, mode) as f:
f.write("run {}:\n".format(nr + 1))
f.write("total time: {:.2f} seconds for {} samples\n".format(tend - tstart, it * batch_size))
f.write("iteration time: {:.4f} seconds/sample\n".format((tend - tstart)/float(it * batch_size)))
f.write("throughput: {:.2f} samples/second\n".format(float(it * batch_size)/(tend - tstart)))
f.write("bandwidth: {:.2f} GB/s\n".format(float(it * batch_size * sample_size) / (tend - tstart)))
f.write("cpu utilization: {:.2f}%\n".format(np.mean(cpu_util_arr)))
f.write("\n")
# wait for everyone to finish
comm.barrier()
if __name__ == "__main__":
main(parser.parse_arguments())
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/driver/test_numpy_dali.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse as ap
def parse_arguments():
# set up parser
AP = ap.ArgumentParser()
AP.add_argument("--outputfile", type=str, help="Full path to output file.")
AP.add_argument("--data_dirs", type=str, nargs='+', help="List of directories which hold data. The files will be sharded evenly across all ranks/GPUs.")
AP.add_argument("--trt_model_dir", type=str, default=None, help="Directory where to store and read TRT models to and from.")
AP.add_argument("--num_warmup_runs", type=int, default=1, help="Number of warmup experiments to run.")
AP.add_argument("--num_runs", type=int, default=1, help="Number of experiments to run.")
AP.add_argument("--batch_size", type=int, default=16, help="Global batch size. Make sure it is bigger than the number of ranks.")
AP.add_argument("--max_inter_threads", type=int, default=1, help="Maximum number of concurrent readers")
AP.add_argument("--max_intra_threads", type=int, default=8, help="Maximum degree of parallelism within reader")
AP.add_argument("--device_count", type=int, default=None, help="Number of devices, necessary to override torch default detection.")
AP.add_argument("--device_id", type=int, default=None, help="Select device to run on, if None it is selected automatically.")
AP.add_argument("--global_shuffle", action='store_true')
AP.add_argument("--shuffle", action='store_true')
AP.add_argument("--visualize", action='store_true')
AP.add_argument("--visualization_output_dir", type=str, default=None, help="Path for storing the visualizations (if requested).")
AP.add_argument("--preprocess", action='store_true')
AP.add_argument("--mode", type=str, choices=["train", "inference", "io"], help="Which mode to run the benchmark in")
AP.add_argument("--enable_gds", action='store_true')
AP.add_argument("--enable_fp16", action='store_true')
AP.add_argument("--enable_trt", action='store_true')
AP.add_argument("--enable_graphs", action='store_true')
AP.add_argument("--enable_nhwc", action='store_true')
AP.add_argument("--drop_fs_cache", action='store_true')
AP.add_argument("--disable_mmap", action='store_true')
parsed = AP.parse_args()
# sanitization
if parsed.mode in {"inference", "train"}:
parsed.preprocess = True
if parsed.enable_gds:
# we do not need to drop caches here and disable mmap
parsed.drop_fs_cache = False
parsed.disable_mmap = True
return parsed
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/parser.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import io
import requests
import argparse as AP
import numpy as np
import h5py as h5
import concurrent.futures as cf
def process_file(source_path, target_path):
# get file content
try:
response = requests.get(source_path)
except:
print(f"Cannot open file {source_path}")
return (source_path, False)
# load data
handle = io.BytesIO(response.content)
with h5.File(handle, "r") as f:
data = f["climate"]["data"][...].astype(np.float32)
label = np.stack([f["climate"]["labels_0"][...], f["climate"]["labels_1"][...]], axis=-1)
# get file basename:
basename = os.path.basename(source_path)
# save data and label:
dataname = os.path.join(target_path, basename.replace(".h5", ".npy"))
labelname = os.path.join(target_path, basename.replace(".h5", ".npy").replace("data-", "label-"))
# save stuff
np.save(dataname, data)
np.save(labelname, label)
return (source_path, True)
def download_data(target_dir, num_files, overwrite = False):
# fetch from here
root_url = "https://portal.nersc.gov/project/dasrepo/deepcam/climate-data/All-Hist/"
# get list of files
response = requests.get(root_url + "validation/files.txt")
filelist = response.content.decode('utf-8').strip().split("\n")
filelist = sorted([x for x in filelist if x.endswith(".h5")])
# create directory if doesn't exist
os.makedirs(target_dir, exist_ok=True)
# only use first n-samples
if num_files > 0:
filelist = filelist[0:num_files]
# check which files are missing
files_downloaded = [x for x in os.listdir(target_dir) if x.endswith(".npy") and x.startswith("data-")]
# filter tags
if not overwrite:
files_missing = list(filter(lambda x: x.replace(".h5", ".npy") not in files_downloaded, filelist))
else:
files_missing = filelist
# perform the loads
executor = cf.ThreadPoolExecutor(max_workers = args.num_streams)
# starts the download
print("Starting download")
retry_count=0
while files_missing or (retry_count < args.num_retries):
futures = []
for fname in files_missing:
futures.append(executor.submit(process_file, root_url + "validation/" + fname, target_dir))
files_missing = []
for future in cf.as_completed(futures):
fname, status = future.result()
if not status:
print(f"Re-queueing {fname}")
files_missing.append(os.path.basename(fname))
# increment retry counter
retry_count += 1
print("done")
if files_missing:
files_missing_string = "\n".join(files_missing)
print(f"The following files could not be downloaded: {files_missing_string}")
if __name__ == "__main__":
parser = AP.ArgumentParser(description = 'Download and preprocess files for deepcam benchmark')
parser.add_argument('--target-dir', type=str, help="directory where to download the files to")
parser.add_argument('--num-files', type=int, default=-1, help="How many files to download, default will download all files")
parser.add_argument('--num-streams', type=int, default=1, help="How many parallel streams do we want to employ for downloading")
parser.add_argument('--num-retries', type=int, default=5, help="Number of retries scheduled per file before it is aborted")
parser.add_argument('--overwrite', action='store_true')
args = parser.parse_args()
# download data
download_data(args.target_dir,
args.num_files,
args.overwrite)
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/download_data.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# base modules
import os
import sys
# torch modules
import torch
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.cuda.amp as amp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
# tensorrt modules
import tensorrt as trt
import torch_tensorrt
# custom modules
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from architecture import deeplab_xception
#reload model helper
def reload_model(model_path, model, device_id):
# load checkpoint
checkpoint = torch.load(model_path, map_location = device_id)
# we need to do some key hacking for the model dict
model_dict = {}
for k in checkpoint:
model_dict[k.replace("module.","")] = checkpoint[k]
#load model
model.load_state_dict(model_dict)
class ModelHandler(object):
def __init__(self, pargs, channels, local_batch_size, device, comm_size, comm_rank, run_in_stream=False):
# extract parameters
self.use_fp16 = pargs.enable_fp16
self.use_trt = pargs.enable_trt
self.use_graphs = pargs.enable_graphs
self.use_nhwc = pargs.enable_nhwc
self.do_inference = (pargs.mode == "inference")
self.do_train = (pargs.mode == "train")
model_path = "/share/model.pth"
trt_model_dir = pargs.trt_model_dir
self.device = device
# create stream
self.pyt_stream = torch.cuda.Stream(self.device, -1)
self.fw_stream = self.pyt_stream if run_in_stream else torch.cuda.current_stream()
#init data parallel model
net = deeplab_xception.DeepLabv3_plus(nInputChannels=4, n_classes=3, os=16, pretrained=False).to(self.device)
if self.use_nhwc:
net = net.to(memory_format=torch.channels_last)
if self.do_train:
net = DDP(net, device_ids=[self.device.index])
else:
reload_model(model_path, net, self.device)
#broadcast
net.eval()
if self.use_fp16 and self.do_inference:
net.half()
# load trt model
if self.use_trt and not self.do_train:
# Torch-TensorRT debug level
#torch_tensorrt.logging.set_reportable_log_level(torch_tensorrt.logging.Level.Debug)
# set device
torch_tensorrt.set_device(self.device.index)
# filename
trtfile = f"model_fp16_bs{local_batch_size}_gpu{self.device.index}.trt" \
if self.use_fp16 else f"model_fp32_bs{local_batch_size}_gpu{self.device.index}.trt"
if trt_model_dir is not None:
trtfile = os.path.join(trt_model_dir, trtfile)
if os.path.isfile(trtfile):
if comm_rank == 0:
print("Loading TRT model")
net_trt = torch.jit.load(trtfile, map_location=self.device)
else:
if comm_rank == 0:
print("Compiling TRT model")
# Torch-TensorRT compile settings
input_dtype = torch.half if self.use_fp16 else torch.float
input_shape = (local_batch_size, len(channels), 768, 1152)
input_format = torch.channel_last if self.use_nhwc else torch.contiguous_format
# JIT the model
net_script = torch.jit.script(net)
# TRT compile the model
net_trt = torch_tensorrt.compile(net_script,
inputs=[torch_tensorrt.Input(input_shape, dtype=input_dtype, format=input_format)],
enabled_precisions={input_dtype},
device=self.device)
if trt_model_dir is not None:
os.makedirs(trt_model_dir, exist_ok=True)
torch.jit.save(net_trt, trtfile)
# switch to model
self.net_fw = net_trt
else:
print("Using PyTorch model")
self.net_fw = net
if self.use_graphs:
print("Capturing Graph")
self.static_inputs = torch.ones((local_batch_size, len(channels), 768, 1152), dtype=torch.float32).to(device)
if self.use_nhwc:
self.static_inputs = self.static_inputs.contiguous(memory_format = torch.channels_last)
if self.use_fp16:
self.static_inputs = self.static_inputs.half()
self.pyt_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.pyt_stream):
with torch.no_grad():
for _ in range(10):
self.static_outputs = self.net_fw(self.static_inputs)
self.pyt_stream.synchronize()
# capture
self.graph = torch.cuda.CUDAGraph()
self.graph.capture_begin()
self.static_outputs = self.net_fw(self.static_inputs)
self.graph.capture_end()
self.pyt_stream.synchronize()
print("Graph Capture Done")
def sync(self):
self.fw_stream.synchronize()
def run(self, inputs, labels):
#training?
if self.do_train:
with torch.cuda.stream(self.fw_stream):
#FW
with amp.autocast(enabled = self.use_fp16):
outputs = self.net_fw(inputs)
result = outputs, labels
#inference?
if self.do_inference:
with torch.no_grad():
#convert to FP16 if requested
with torch.cuda.stream(self.fw_stream):
if self.use_fp16:
inputs = inputs.half()
if self.use_nhwc:
inputs = inputs.to(memory_format=torch.channels_last)
if self.use_graphs:
self.static_inputs.copy_(inputs)
self.graph.replay()
outputs = self.static_outputs.clone()
else:
#pass forward
outputs = self.net_fw.forward(inputs)
result = outputs, None
else:
result = None, None
return result
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/model_handler.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from torch.autograd import Variable
from torch import nn
import numpy as np
def fp_loss(logit, target, weight, fpw_1=0, fpw_2=0):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
#later should use cuda
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), reduction='none')
losses = criterion(logit, target.long())
preds = torch.max(logit, 1)[1]
#is fp 1
is_fp_one = (torch.eq(preds, 1) & torch.ne(preds, 1)).float()
fp_matrix_one = (is_fp_one * fpw_1) + 1
losses = torch.mul(fp_matrix_one, losses)
#is fp 1
is_fp_two = (torch.eq(preds, 2) & torch.ne(preds, 2)).float()
fp_matrix_two = (is_fp_two * fpw_2) + 1
losses = torch.mul(fp_matrix_two, losses)
loss = torch.mean(losses)
return loss
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/losses.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import numpy as np
import h5py as h5
import argparse as ap
from tqdm import tqdm
def main(pargs):
#check path
inputpath = pargs.input
outputpath = pargs.output
#check inputs
filenames = [x for x in os.listdir(inputpath) if x.endswith(".h5")]
#create outputpath if doesn't exist:
os.makedirs(outputpath, exist_ok=True)
for filename in tqdm(filenames):
#read input
inputfile = os.path.join(inputpath, filename)
with h5.File(inputfile, 'r') as f:
data = f["climate"]["data"][...].astype(np.float32)
label = np.stack([f["climate"]["labels_0"][...], f["climate"]["labels_1"][...]], axis=-1)
#write output
filenamebase = os.path.splitext(filename)[0]
datafile = os.path.join(outputpath, filenamebase+'_data.npy')
if not os.path.isfile(datafile):
np.save(datafile, data)
labelfile = os.path.join(outputpath, filenamebase+'_label.npy')
if not os.path.isfile(labelfile):
np.save(labelfile, label)
if __name__ == '__main__':
AP = ap.ArgumentParser()
AP.add_argument("--input", type=str, help="input directory with hdf5 files")
AP.add_argument("--output",type=str, help="output directory for the npy files")
parsed = AP.parse_args()
main(parsed)
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/h52npy.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#basic modules
import time
import sys
import os
import numpy as np
#matplotlib
import matplotlib as mpl
mpl.use('agg')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#basemap
from mpl_toolkits.basemap import Basemap
class CamVisualizer(object):
def __init__(self):
# Create figre
lats = np.linspace(-90,90,768)
longs = np.linspace(-180,180,1152)
self.my_map = Basemap(projection='gall', llcrnrlat=min(lats),
llcrnrlon=min(longs), urcrnrlat=max(lats),
urcrnrlon=max(longs), resolution = 'i')
xx, yy = np.meshgrid(longs, lats)
self.x_map, self.y_map = self.my_map(xx,yy)
# Create new colormap
colors_1 = [(252-32*i,252-32*i,252-32*i,i*1/16) for i in np.linspace(0, 1, 32)]
colors_2 = [(220-60*i,220-60*i,220,i*1/16+1/16) for i in np.linspace(0, 1, 32)]
colors_3 = [(160-20*i,160+30*i,220,i*3/8+1/8) for i in np.linspace(0, 1, 96)]
colors_4 = [(140+80*i,190+60*i,220+30*i,i*4/8+4/8) for i in np.linspace(0, 1, 96)]
colors = colors_1 + colors_2 + colors_3 + colors_4
colors = list(map(lambda c: (c[0]/256,c[1]/256,c[2]/256,c[3]), colors))
self.my_cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', colors, N=64)
#print once so that everything is set up
self.my_map.bluemarble()
self.my_map.drawcoastlines()
def plot(self, filename, title_prefix, data, label, year, month, day, hour):
# Get data
tstart = time.time()
data = np.roll(data,[0,int(1152/2)])
# Get labels
label = np.roll(label, [0,int(1152/2)])
l1 = (label == 1)
l2 = (label == 2)
print("extract data: {}".format(time.time() - tstart))
#pdf
#with PdfPages(filename+'.pdf') as pdf:
#get figure
fig = plt.figure(figsize=(100,20), dpi=100)
#draw stuff
tstart = time.time()
self.my_map.bluemarble()
self.my_map.drawcoastlines()
print("draw background: {}".format(time.time() - tstart))
# Plot data
tstart = time.time()
self.my_map.contourf(self.x_map, self.y_map, data, 128, vmin=0, vmax=89, cmap=self.my_cmap, levels=np.arange(0,89,2))
print("draw data: {}".format(time.time() - tstart))
# Plot colorbar
tstart = time.time()
cbar = self.my_map.colorbar(ticks=np.arange(0,89,11))
cbar.ax.set_ylabel('Integrated Water Vapor kg $m^{-2}$',size=32)
cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=28)
print("draw colorbar: {}".format(time.time() - tstart))
# Draw Tropical Cyclones & Atmospheric Rivers
tstart = time.time()
tc_contour = self.my_map.contour(self.x_map, self.y_map, l1, [0.5], linewidths=3, colors='orange')
ar_contour = self.my_map.contour(self.x_map, self.y_map, l2, [0.5], linewidths=3, colors='magenta')
print("draw contours: {}".format(time.time() - tstart))
tstart = time.time()
self.my_map.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,0,1])
self.my_map.drawparallels(np.arange(-90, 90, 30), labels =[1,0,0,0])
print("draw meridians: {}".format(time.time() - tstart))
# Plot legend and title
tstart = time.time()
lines = [tc_contour.collections[0], ar_contour.collections[0]]
labels = ['Tropical Cyclone', "Atmospheric River"]
plt.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2)
plt.setp(plt.gca().get_legend().get_texts(), fontsize='38')
plt.title("{} Extreme Weather Patterns {:04d}-{:02d}-{:02d}".format(title_prefix, int(year), int(month), int(day)), fontdict={'fontsize': 44})
print("draw legend/title: {}".format(time.time() - tstart))
tstart = time.time()
#pdf.savefig(bbox_inches='tight')
#mask_ex = plt.gcf()
#mask_ex.savefig(filename, bbox_inches='tight')
plt.gcf().savefig(filename, format="PNG", bbox_inches='tight')
plt.clf()
print("save plot: {}".format(time.time() - tstart))
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/utils/visualizer.py |
# MIT License
#
# Copyright (c) 2018 Pyjcsx, 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation,
groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def compute_padding(kernel_size, rate):
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
return pad_beg, pad_end
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
super(SeparableConv2d_same, self).__init__()
pad_beg, pad_end = compute_padding(kernel_size, rate=dilation)
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, (pad_beg, pad_beg), dilation,
groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = nn.BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(nn.BatchNorm2d(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
rep.append(nn.BatchNorm2d(planes))
if not start_with_relu:
rep = rep[1:]
if stride != 1:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2))
if stride == 1 and is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
class Xception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if os == 16:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
elif os == 8:
entry_block3_stride = 1
middle_block_rate = 2
exit_block_rates = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True,
is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0],
start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_rates[1])
self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_rates[1])
self.bn4 = nn.BatchNorm2d(1536)
self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_rates[1])
self.bn5 = nn.BatchNorm2d(2048)
# Init weights
self.__init_weight()
# Load pretrained model
if pretrained:
self.__load_xception_pretrained()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
print(k)
if k in state_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('block11'):
model_dict[k.replace('block11', 'block12')] = v
elif k.startswith('conv3'):
model_dict[k] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
if rate == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = rate
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=rate, bias=False)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.__init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class TilingNCHW(nn.Module):
def __init__(self, tiles):
super(TilingNCHW, self).__init__()
self.tiles = tiles
# warning, this trick only works for H=W=1
# hacky way to deal with annoying layout changes in tile:
def forward(self, x):
return torch.tile(x, self.tiles)
class TilingNHWC(nn.Module):
def __init__(self, tiles):
super(TilingNHWC, self).__init__()
self.tiles = tiles
def forward(self, x):
N, C, H, W = x.shape
xtmp = torch.as_strided(x, size=[N, 1, 1, C], stride=[C, C, C, 1])
# tiling is in nchw, needs to change to nhwc
NT, CT, HT, WT = self.tiles
xtmp = torch.tile(xtmp, [NT, HT, WT, CT])
x = torch.as_strided(xtmp, size=[N*NT, C*CT, HT, WT], stride=[HT*WT*CT*C, 1, WT*CT*C, CT*C])
return x
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print("Constructing DeepLabv3+ model...")
print("Number of classes: {}".format(n_classes))
print("Output stride: {}".format(os))
print("Number of Input Channels: {}".format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
# Atrous Conv
self.xception_features = Xception(nInputChannels, os, pretrained)
# ASPP
if os == 16:
rates = [1, 6, 12, 18]
elif os == 8:
rates = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(2048, 256, 1, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True))
self.interp_shape1 = (48, 72)
self.interp_shape2 = (192, 288)
self.interp_shape3 = (768, 1152)
#tiles = (1, 1) + self.interp_shape1
#self.tiling = TilingNCHW(tiles)
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
# adopt [1x1, 48] for channel reduction.
self.conv2 = nn.Conv2d(128, 48, 1, bias=False)
self.bn2 = nn.BatchNorm2d(48)
self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, n_classes, kernel_size=1, stride=1))
def forward(self, input):
x, low_level_features = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
# use this because better compatible with trt
x5 = F.interpolate(x5, size=self.interp_shape1,
mode='bilinear', align_corners=True)
#x5 = self.tiling(x5)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.interpolate(x, size=self.interp_shape2,
mode='bilinear', align_corners=True)
low_level_features = self.conv2(low_level_features)
low_level_features = self.bn2(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.last_conv(x)
x = F.interpolate(x, size=self.interp_shape3,
mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
if __name__ == "__main__":
model = DeepLabv3_plus(nInputChannels=3, n_classes=21, os=16, pretrained=True, _print=True)
model.eval()
image = torch.randn(1, 3, 512, 512)
with torch.no_grad():
output = model.forward(image)
print(output.size())
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/architecture/deeplab_xception.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/architecture/__init__.py |
# MIT License
#
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import glob
import numpy as np
import torch
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
from nvidia.dali.plugin.pytorch import DALIGenericIterator, LastBatchPolicy
class CamDaliDataloader(object):
def get_pipeline(self):
pipeline = Pipeline(self.batchsize, self.num_threads, self.device, seed=333)
with pipeline:
data = fn.readers.numpy(name="data",
device = self.io_device,
files = self.data_files,
num_shards = self.num_shards,
shard_id = self.shard_id,
stick_to_shard = self.stick_to_shard,
shuffle_after_epoch = self.shuffle,
prefetch_queue_depth = 2,
cache_header_information = True,
register_buffers = True,
dont_use_mmap = (not self.use_mmap) or (self.io_device == "gpu")).gpu()
if self.load_label:
label = fn.readers.numpy(name="label",
device = self.io_device,
files = self.label_files,
num_shards = self.num_shards,
shard_id = self.shard_id,
stick_to_shard = self.stick_to_shard,
shuffle_after_epoch = self.shuffle,
prefetch_queue_depth = 2,
cache_header_information = True,
register_buffers = True,
dont_use_mmap = (not self.use_mmap) or (self.io_device == "gpu")).gpu()
data = fn.transpose(data,
device = "gpu",
perm = [2, 0, 1])
if self.load_label:
pipeline.set_outputs(data, label)
else:
pipeline.set_outputs(data)
return pipeline
def init_files(self, root_dirs, prefix_data, prefix_label):
self.root_dirs = root_dirs
self.prefix_data = prefix_data
self.prefix_label = prefix_label
# get files
self.data_files = []
for directory in self.root_dirs:
self.data_files += sorted(glob.glob(os.path.join(directory, self.prefix_data)))
if self.load_label:
self.label_files = []
for directory in self.root_dirs:
self.label_files += sorted(glob.glob(os.path.join(directory, self.prefix_label)))
# shuffle globally if requested
if not self.stick_to_shard and self.shuffle:
self.rng = np.random.default_rng(seed=333)
perm = self.rng.permutation(len(self.data_files))
self.data_files = np.array(self.data_files)[perm].tolist()
if self.load_label:
self.label_files = np.array(self.label_files)[perm].tolist()
# get shapes
self.data_shape = np.load(self.data_files[0]).shape
if self.load_label:
self.label_shape = np.load(self.label_files[0]).shape
# clean up old iterator
if self.iterator is not None:
del(self.iterator)
self.iterator = None
# clean up old pipeline
if self.pipeline is not None:
del(self.pipeline)
self.pipeline = None
# io devices
self.io_device = "gpu" if self.read_gpu else "cpu"
# define pipeline
self.pipeline = self.get_pipeline()
# build pipeline
self.pipeline.build()
# build pipes
self.length = len(self.data_files)
# create iterator but do not prepare first batch
tags = ['data', 'label'] if self.load_label else ['data']
self.iterator = DALIGenericIterator([self.pipeline], tags,
reader_name="data", auto_reset = True,
prepare_first_batch = False,
last_batch_policy = LastBatchPolicy.DROP)
def __init__(self, root_dirs, prefix_data, prefix_label,
channels, batchsize, num_threads = 1, device = -1,
num_shards = 1, shard_id = 0, stick_to_shard = True,
lazy_init = False, read_gpu = False, use_mmap = True,
shuffle = False, preprocess = True):
# read filenames first
self.channels = channels
self.batchsize = batchsize
self.num_threads = num_threads
self.device = device
self.io_device = "gpu" if read_gpu else "cpu"
self.use_mmap = use_mmap
self.shuffle = shuffle
self.preprocess = preprocess
self.read_gpu = read_gpu
self.pipeline = None
self.iterator = None
self.lazy_init = lazy_init
self.load_label = prefix_label is not None
# sharding
self.num_shards = num_shards
self.shard_id = shard_id
self.stick_to_shard = stick_to_shard
# init files
self.init_files(root_dirs, prefix_data, prefix_label)
@property
def shapes(self):
if self.load_label:
return self.data_shape, self.label_shape
else:
return self.data_shape
@property
def sample_sizes(self):
data_size = np.prod(self.data_shape) * 4
label_size = 0 if not self.load_label else np.prod(self.label_shape) * 4
return data_size, label_size
def __iter__(self):
if self.load_label:
for token in self.iterator:
data = token[0]['data']
label = token[0]['label']
if self.preprocess:
data = data[:, self.channels, ...]
label = torch.squeeze(label[..., 0])
yield data, label, ""
else:
for token in self.iterator:
data = token[0]['data']
if self.preprocess:
data = data[:, self.channels, ...]
yield data, None, ""
| MagnumIO-main | gds/benchmarks/pytorch/deepCam-inference/data/cam_numpy_dali_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(name='numpy_reader',
ext_modules=[CUDAExtension(name='numpy_reader',
sources=['cpp/thread_pool.cpp', 'cpp/numpy_reader.cpp'],
libraries=["cufile"],
extra_compile_args={'cxx': ['-g', '-O2'], 'nvcc': ['-g', '-O2']})
],
cmdclass={'build_ext': BuildExtension})
| MagnumIO-main | gds/readers/pytorch-numpy/setup.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import .numpy_reader as nr
| MagnumIO-main | gds/readers/pytorch-numpy/python/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
#custom reader
import numpy_reader as nr
#dataset class
class SegmentationDataset(Dataset):
#set a new path for files
def init_files(self, source):
self.source = source
self.files = [x.replace("_data.npy", "") for x in sorted(os.listdir(self.source)) if x.endswith("_data.npy")]
if self.shuffle:
np.random.shuffle(self.files)
self.length = len(self.files)
def __init__(self, source, num_intra_threads = 1, device = -1, shuffle = False):
self.shuffle = shuffle
#init files
self.init_files(source)
#init numpy loader
filename = os.path.join(self.source, self.files[0])
#data
self.npr_data = nr.numpy_reader(split_axis = False, device = device)
self.npr_data.num_intra_threads = num_intra_threads
self.npr_data.parse(filename + "_data.npy")
#label
self.npr_label = nr.numpy_reader(split_axis = False, device = device)
self.npr_label.num_intra_threads = num_intra_threads
self.npr_label.parse(filename + "_label.npy")
print("Initialized dataset with ", self.length, " samples.")
def __len__(self):
return self.length
@property
def shapes(self):
return self.npr_data.shape, self.npr_label.shape
def __getitem__(self, idx):
filename = os.path.join(self.source, self.files[idx])
try:
#load data
self.npr_data.init_file(filename + "_data.npy")
X = self.npr_data.get_sample(0)
self.npr_data.finalize_file()
#load label
self.npr_label.init_file(filename + "_label.npy")
Y = self.npr_label.get_sample(0)
self.npr_label.finalize_file()
except OSError:
print("Could not open file " + filename)
sleep(5)
#preprocess
X = X.permute(2, 0, 1)
return X, Y, filename
| MagnumIO-main | gds/readers/pytorch-numpy/python/example.py |
from cucim.clara.filesystem import CuFileDriver
import cucim.clara.filesystem as fs
import os
import cupy as cp
import torch
# Write a file with size 10 (in bytes)
with open("input.raw", "wb") as input_file:
input_file.write(
bytearray([101, 102, 103, 104, 105, 106, 107, 108, 109, 110]))
# Create a CuPy array with size 10 (in bytes)
cp_arr = cp.ones(10, dtype=cp.uint8)
# Create a PyTorch array with size 10 (in bytes)
cuda0 = torch.device('cuda:0')
torch_arr = torch.ones(10, dtype=torch.uint8, device=cuda0)
# Using CuFileDriver
# (Opening a file with O_DIRECT flag is required for GDS)
fno = os.open("input.raw", os.O_RDONLY | os.O_DIRECT)
with CuFileDriver(fno) as fd:
# Read 8 bytes starting from file offset 0 into buffer offset 2
read_count = fd.pread(cp_arr, 8, 0, 2)
# Read 10 bytes starting from file offset 3
read_count = fd.pread(torch_arr, 10, 3)
os.close(fno)
# Another way of opening file with cuFile
with fs.open("output.raw", "w") as fd:
# Write 10 bytes from cp_array to file starting from offset 5
write_count = fd.pwrite(cp_arr, 10, 5)
| MagnumIO-main | gds/readers/cucim-gds/test_gds.py |
#!/usr/bin/env python
import argparse
import math
import os
import sys
from nvbench_json import reader
import tabulate
# Parse version string into tuple, "x.y.z" -> (x, y, z)
def version_tuple(v):
return tuple(map(int, (v.split("."))))
tabulate_version = version_tuple(tabulate.__version__)
all_devices = []
def format_axis_value(axis_value, axis_type):
if axis_type == "int64":
return "%d" % int(axis_value)
elif axis_type == "float64":
return "%.5g" % float(axis_value)
else:
return axis_value
def format_walltime(seconds_in):
h = math.floor(seconds_in / (60 * 60))
m = math.floor((seconds_in / 60) % 60)
s = math.floor(seconds_in % 60)
ms = math.floor((seconds_in * 1000) % 1000)
return "{}{}{}{}".format(
"{:0>2d}:".format(h) if h > 1e-9 else "",
"{:0>2d}:".format(m) if (h > 1e-9 or m > 1e-9) else "",
"{:0>2d}.".format(s) if (h > 1e-9 or m > 1e-9) else "{:d}.".format(s),
"{:0>3d}".format(ms))
def format_percentage(percentage):
# When there aren't enough samples for a meaningful noise measurement,
# the noise is recorded as infinity. Unfortunately, JSON spec doesn't
# allow for inf, so these get turned into null.
if percentage is None:
return "inf"
return "%0.2f%%" % (percentage * 100.0)
measure_names = ["cold", "batch", "cupti"]
measure_column_names = {"cold": "Isolated", "batch": "Batch", "cupti": "CUPTI"}
def init_measures():
out = {}
for name in measure_names:
out[name] = 0.
return out
def get_measures(state):
summaries = state["summaries"]
times = {}
for name in measure_names:
measure_walltime_tag = "nv/{}/walltime".format(name)
summary = next(filter(lambda s: s["tag"] == measure_walltime_tag,
summaries),
None)
if not summary:
continue
walltime_data = next(filter(lambda d: d["name"] == "value", summary["data"]))
assert(walltime_data["type"] == "float64")
walltime = walltime_data["value"]
walltime = float(walltime)
times[name] = walltime if walltime else 0.
return times
def merge_measures(target, src):
for name, src_val in src.items():
target[name] += src_val
def sum_measures(measures):
total_time = 0.
for time in measures.values():
total_time += time
return total_time
def get_active_measure_names(measures):
names = []
for name, time in measures.items():
if time > 1e-9:
names.append(name)
return names
def append_measure_headers(headers, active=measure_names):
for name in active:
headers.append(measure_column_names[name])
def append_measure_values(row, measures, active=measure_names):
for name in active:
row.append(format_walltime(measures[name]))
def consume_file(filename):
file_root = reader.read_file(filename)
file_out = {}
file_measures = init_measures()
benches = {}
for bench in file_root["benchmarks"]:
bench_data = consume_benchmark(bench, file_root)
merge_measures(file_measures, bench_data["measures"])
benches[bench["name"]] = bench_data
file_out["benches"] = benches
file_out["measures"] = file_measures
return file_out
def consume_benchmark(bench, file_root):
bench_out = {}
# Initialize axis map
axes_out = {}
axes = bench["axes"]
if axes:
for axis in axes:
values_out = {}
axis_name = axis["name"]
axis_type = axis["type"]
for value in axis["values"]:
if axis_type == "type":
value = value["input_string"]
else:
value = format_axis_value(value["value"], axis_type)
values_out[value] = {"measures": init_measures()}
axes_out[axis_name] = values_out
states_out = {}
bench_measures = init_measures()
for state in bench["states"]:
state_name = state["name"]
# Get walltimes for each measurement:
state_measures = get_measures(state)
state_out = {}
state_out["measures"] = state_measures
states_out[state_name] = state_out
# Update the benchmark measures walltimes
merge_measures(bench_measures, state_measures)
# Update the axis measurements:
axis_values = state["axis_values"]
if axis_values:
for axis_value in axis_values:
axis_name = axis_value["name"]
value = format_axis_value(axis_value["value"], axis_value["type"])
merge_measures(axes_out[axis_name][value]["measures"], state_measures)
bench_out["axes"] = axes_out
bench_out["measures"] = bench_measures
bench_out["states"] = states_out
return bench_out
def print_overview_section(data):
print("# Walltime Overview\n")
measures = data["measures"]
active_measures = get_active_measure_names(measures)
headers = ["Walltime"]
append_measure_headers(headers, active_measures)
colalign = ["right"] * len(headers)
rows = []
row = [format_walltime(sum_measures(measures))]
append_measure_values(row, measures, active_measures)
rows.append(row)
# colalign and github format require tabulate 0.8.3
if tabulate_version >= (0, 8, 3):
print(tabulate.tabulate(rows,
headers=headers,
colalign=colalign,
tablefmt="github"))
else:
print(tabulate.tabulate(rows,
headers=headers,
tablefmt="markdown"))
print()
# append_data_row_lambda args: (row_list, name, items[name])
def print_measures_table(headers, colalign, items, total_measures, append_item_row_lambda):
total_time = sum_measures(total_measures)
active_measures = get_active_measure_names(total_measures)
num_user_columns = len(headers)
headers.append("%")
headers.append("Walltime")
append_measure_headers(headers, active_measures)
while len(colalign) < len(headers):
colalign.append("right")
rows = []
for name, item in items.items():
item_measures = item["measures"]
item_time = sum_measures(item_measures)
row = []
append_item_row_lambda(row, name, item)
if total_time > 1e-9:
row.append(format_percentage(item_time / total_time))
else:
row.append(format_percentage(0))
row.append(format_walltime(item_time))
append_measure_values(row, item_measures, active_measures)
rows.append(row)
# Totals:
row = []
if num_user_columns != 0:
row.append("Total")
while len(row) < num_user_columns:
row.append("")
row.append(format_percentage(1))
row.append(format_walltime(total_time))
append_measure_values(row, total_measures, active_measures)
rows.append(row)
# colalign and github format require tabulate 0.8.3
if tabulate_version >= (0, 8, 3):
print(tabulate.tabulate(rows,
headers=headers,
colalign=colalign,
tablefmt="github"))
else:
print(tabulate.tabulate(rows,
headers=headers,
tablefmt="markdown"))
def print_files_section(data):
print("# Files\n")
items = data["files"]
total_measures = data["measures"]
headers = ["Filename"]
colalign = ["left"]
def append_row(row, name, item):
row.append(name)
print_measures_table(headers, colalign, items, total_measures, append_row)
print()
for filename, file in items.items():
print_file_section(filename, file)
def print_file_section(filename, file):
print("## File: {}\n".format(filename))
items = file["benches"]
total_measures = file["measures"]
headers = ["Benchmark"]
colalign = ["left"]
def append_row_name(row, name, item):
row.append(name)
print_measures_table(headers, colalign, items, total_measures, append_row_name)
print()
for bench_name, bench in items.items():
print_bench_section(bench_name, bench)
def print_bench_section(bench_name, bench):
print("### Benchmark: {}\n".format(bench_name))
# TODO split this up so each axis is a column
items = bench["states"]
total_measures = bench["measures"]
headers = ["Configuration"]
colalign = ["left"]
def append_row_name(row, name, item):
row.append(name)
print_measures_table(headers, colalign, items, total_measures, append_row_name)
print()
for axis_name, axis in bench["axes"].items():
total_measures = bench["measures"]
headers = ["Axis: " + axis_name]
colalign = ["left"]
print_measures_table(headers, colalign, axis, total_measures, append_row_name)
print()
def main():
help_text = "%(prog)s [nvbench.out.json | dir/]..."
parser = argparse.ArgumentParser(prog='nvbench_walltime', usage=help_text)
args, files_or_dirs = parser.parse_known_args()
filenames = []
for file_or_dir in files_or_dirs:
if os.path.isdir(file_or_dir):
for f in os.listdir(file_or_dir):
if os.path.splitext(f)[1] != ".json":
continue
filename = os.path.join(file_or_dir, f)
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
filenames.append(filename)
else:
filenames.append(file_or_dir)
filenames.sort()
data = {}
files_out = {}
measures = init_measures()
for filename in filenames:
file_data = consume_file(filename)
merge_measures(measures, file_data["measures"])
files_out[filename] = file_data
data["files"] = files_out
data["measures"] = measures
print_overview_section(data)
print_files_section(data)
if __name__ == '__main__':
sys.exit(main())
| nvbench-main | scripts/nvbench_walltime.py |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import os
import sys
from nvbench_json import reader
def parse_files():
help_text = "%(prog)s [nvbench.out.json | dir/] ..."
parser = argparse.ArgumentParser(prog='nvbench_histogram', usage=help_text)
args, files_or_dirs = parser.parse_known_args()
filenames = []
for file_or_dir in files_or_dirs:
if os.path.isdir(file_or_dir):
for f in os.listdir(file_or_dir):
if os.path.splitext(f)[1] != ".json":
continue
filename = os.path.join(file_or_dir, f)
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
filenames.append(filename)
else:
filenames.append(file_or_dir)
filenames.sort()
if not filenames:
parser.print_help()
exit(0)
return filenames
def extract_filename(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "filename", summary_data))
assert(value_data["type"] == "string")
return value_data["value"]
def extract_size(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "size", summary_data))
assert(value_data["type"] == "int64")
return int(value_data["value"])
def parse_samples_meta(filename, state):
summaries = state["summaries"]
if not summaries:
return None, None
summary = next(filter(lambda s: s["tag"] == "nv/json/bin:nv/cold/sample_times",
summaries),
None)
if not summary:
return None, None
sample_filename = extract_filename(summary)
# If not absolute, the path is relative to the associated .json file:
if not os.path.isabs(sample_filename):
sample_filename = os.path.join(os.path.dirname(filename), sample_filename)
sample_count = extract_size(summary)
return sample_count, sample_filename
def parse_samples(filename, state):
sample_count, samples_filename = parse_samples_meta(filename, state)
if not sample_count or not samples_filename:
return []
with open(samples_filename, "rb") as f:
samples = np.fromfile(f, "<f4")
assert (sample_count == len(samples))
return samples
def to_df(data):
return pd.DataFrame.from_dict(dict([(k, pd.Series(v)) for k, v in data.items()]))
def parse_json(filename):
json_root = reader.read_file(filename)
samples_data = {}
for bench in json_root["benchmarks"]:
print("Benchmark: {}".format(bench["name"]))
for state in bench["states"]:
print("State: {}".format(state["name"]))
samples = parse_samples(filename, state)
if len(samples) == 0:
continue
samples_data["{} {}".format(bench["name"], state["name"])] = samples
return to_df(samples_data)
def main():
filenames = parse_files()
dfs = [parse_json(filename) for filename in filenames]
df = pd.concat(dfs, ignore_index=True)
sns.displot(df, rug=True, kind="kde", fill=True)
plt.show()
if __name__ == '__main__':
sys.exit(main())
| nvbench-main | scripts/nvbench_histogram.py |
#!/usr/bin/env python
import argparse
import math
import os
import sys
from colorama import Fore
import tabulate
from nvbench_json import reader
# Parse version string into tuple, "x.y.z" -> (x, y, z)
def version_tuple(v):
return tuple(map(int, (v.split("."))))
tabulate_version = version_tuple(tabulate.__version__)
all_devices = []
config_count = 0
unknown_count = 0
failure_count = 0
pass_count = 0
def find_matching_bench(needle, haystack):
for hay in haystack:
if hay["name"] == needle["name"] and hay["axes"] == needle["axes"]:
return hay
return None
def find_device_by_id(device_id):
for device in all_devices:
if device["id"] == device_id:
return device
return None
def format_int64_axis_value(axis_name, axis_value, axes):
axis = next(filter(lambda ax: ax["name"] == axis_name, axes))
axis_flags = axis["flags"]
value = int(axis_value["value"])
if axis_flags == "pow2":
value = math.log2(value)
return "2^%d" % value
return "%d" % value
def format_float64_axis_value(axis_name, axis_value, axes):
return "%.5g" % float(axis_value["value"])
def format_type_axis_value(axis_name, axis_value, axes):
return "%s" % axis_value["value"]
def format_string_axis_value(axis_name, axis_value, axes):
return "%s" % axis_value["value"]
def format_axis_value(axis_name, axis_value, axes):
axis = next(filter(lambda ax: ax["name"] == axis_name, axes))
axis_type = axis["type"]
if axis_type == "int64":
return format_int64_axis_value(axis_name, axis_value, axes)
elif axis_type == "float64":
return format_float64_axis_value(axis_name, axis_value, axes)
elif axis_type == "type":
return format_type_axis_value(axis_name, axis_value, axes)
elif axis_type == "string":
return format_string_axis_value(axis_name, axis_value, axes)
def format_duration(seconds):
if seconds >= 1:
multiplier = 1.0
units = "s"
elif seconds >= 1e-3:
multiplier = 1e3
units = "ms"
elif seconds >= 1e-6:
multiplier = 1e6
units = "us"
else:
multiplier = 1e6
units = "us"
return "%0.3f %s" % (seconds * multiplier, units)
def format_percentage(percentage):
# When there aren't enough samples for a meaningful noise measurement,
# the noise is recorded as infinity. Unfortunately, JSON spec doesn't
# allow for inf, so these get turned into null.
if percentage is None:
return "inf"
return "%0.2f%%" % (percentage * 100.0)
def compare_benches(ref_benches, cmp_benches, threshold):
for cmp_bench in cmp_benches:
ref_bench = find_matching_bench(cmp_bench, ref_benches)
if not ref_bench:
continue
print("# %s\n" % (cmp_bench["name"]))
device_ids = cmp_bench["devices"]
axes = cmp_bench["axes"]
ref_states = ref_bench["states"]
cmp_states = cmp_bench["states"]
axes = axes if axes else []
headers = [x["name"] for x in axes]
colalign = ["center"] * len(headers)
headers.append("Ref Time")
colalign.append("right")
headers.append("Ref Noise")
colalign.append("right")
headers.append("Cmp Time")
colalign.append("right")
headers.append("Cmp Noise")
colalign.append("right")
headers.append("Diff")
colalign.append("right")
headers.append("%Diff")
colalign.append("right")
headers.append("Status")
colalign.append("center")
for device_id in device_ids:
rows = []
for cmp_state in cmp_states:
cmp_state_name = cmp_state["name"]
ref_state = next(filter(lambda st: st["name"] == cmp_state_name,
ref_states),
None)
if not ref_state:
continue
axis_values = cmp_state["axis_values"]
if not axis_values:
axis_values = []
row = []
for axis_value in axis_values:
axis_value_name = axis_value["name"]
row.append(format_axis_value(axis_value_name,
axis_value,
axes))
cmp_summaries = cmp_state["summaries"]
ref_summaries = ref_state["summaries"]
if not ref_summaries or not cmp_summaries:
continue
def lookup_summary(summaries, tag):
return next(filter(lambda s: s["tag"] == tag, summaries), None)
cmp_time_summary = lookup_summary(cmp_summaries, "nv/cold/time/gpu/mean")
ref_time_summary = lookup_summary(ref_summaries, "nv/cold/time/gpu/mean")
cmp_noise_summary = lookup_summary(cmp_summaries, "nv/cold/time/gpu/stdev/relative")
ref_noise_summary = lookup_summary(ref_summaries, "nv/cold/time/gpu/stdev/relative")
# TODO: Use other timings, too. Maybe multiple rows, with a
# "Timing" column + values "CPU/GPU/Batch"?
if not all([cmp_time_summary,
ref_time_summary,
cmp_noise_summary,
ref_noise_summary]):
continue
def extract_value(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "value", summary_data))
assert(value_data["type"] == "float64")
return value_data["value"]
cmp_time = extract_value(cmp_time_summary)
ref_time = extract_value(ref_time_summary)
cmp_noise = extract_value(cmp_noise_summary)
ref_noise = extract_value(ref_noise_summary)
# Convert string encoding to expected numerics:
cmp_time = float(cmp_time)
ref_time = float(ref_time)
diff = cmp_time - ref_time
frac_diff = diff / ref_time
if ref_noise and cmp_noise:
ref_noise = float(ref_noise)
cmp_noise = float(cmp_noise)
min_noise = min(ref_noise, cmp_noise)
elif ref_noise:
ref_noise = float(ref_noise)
min_noise = ref_noise
elif cmp_noise:
cmp_noise = float(cmp_noise)
min_noise = cmp_noise
else:
min_noise = None # Noise is inf
global config_count
global unknown_count
global pass_count
global failure_count
config_count += 1
if not min_noise:
unknown_count += 1
status = Fore.YELLOW + "????" + Fore.RESET
elif abs(frac_diff) <= min_noise:
pass_count += 1
status = Fore.GREEN + "PASS" + Fore.RESET
else:
failure_count += 1
status = Fore.RED + "FAIL" + Fore.RESET
if abs(frac_diff) >= threshold:
row.append(format_duration(ref_time))
row.append(format_percentage(ref_noise))
row.append(format_duration(cmp_time))
row.append(format_percentage(cmp_noise))
row.append(format_duration(diff))
row.append(format_percentage(frac_diff))
row.append(status)
rows.append(row)
if len(rows) == 0:
continue
device = find_device_by_id(device_id)
print("## [%d] %s\n" % (device["id"], device["name"]))
# colalign and github format require tabulate 0.8.3
if tabulate_version >= (0, 8, 3):
print(tabulate.tabulate(rows,
headers=headers,
colalign=colalign,
tablefmt="github"))
else:
print(tabulate.tabulate(rows,
headers=headers,
tablefmt="markdown"))
print("")
def main():
help_text = "%(prog)s [reference.json compare.json | reference_dir/ compare_dir/]"
parser = argparse.ArgumentParser(prog='nvbench_compare', usage=help_text)
parser.add_argument('--threshold-diff', type=float, dest='threshold', default=0.0,
help='only show benchmarks where percentage diff is >= THRESHOLD')
args, files_or_dirs = parser.parse_known_args()
print(files_or_dirs)
if len(files_or_dirs) != 2:
parser.print_help()
sys.exit(1)
# if provided two directories, find all the exactly named files
# in both and treat them as the reference and compare
to_compare = []
if os.path.isdir(files_or_dirs[0]) and os.path.isdir(files_or_dirs[1]):
for f in os.listdir(files_or_dirs[1]):
if os.path.splitext(f)[1] != ".json":
continue
r = os.path.join(files_or_dirs[0], f)
c = os.path.join(files_or_dirs[1], f)
if os.path.isfile(r) and os.path.isfile(c) and \
os.path.getsize(r) > 0 and os.path.getsize(c) > 0:
to_compare.append((r, c))
else:
to_compare = [(files_or_dirs[0], files_or_dirs[1])]
for ref, comp in to_compare:
ref_root = reader.read_file(ref)
cmp_root = reader.read_file(comp)
global all_devices
all_devices = cmp_root["devices"]
# This is blunt but works for now:
if ref_root["devices"] != cmp_root["devices"]:
print("Device sections do not match.")
sys.exit(1)
compare_benches(ref_root["benchmarks"], cmp_root["benchmarks"], args.threshold)
print("# Summary\n")
print("- Total Matches: %d" % config_count)
print(" - Pass (diff <= min_noise): %d" % pass_count)
print(" - Unknown (infinite noise): %d" % unknown_count)
print(" - Failure (diff > min_noise): %d" % failure_count)
return failure_count
if __name__ == '__main__':
sys.exit(main())
| nvbench-main | scripts/nvbench_compare.py |
file_version = (1, 0, 0)
file_version_string = "{}.{}.{}".format(file_version[0],
file_version[1],
file_version[2])
def check_file_version(filename, root_node):
try:
version_node = root_node["meta"]["version"]["json"]
except KeyError:
print("WARNING:")
print(" {} is written in an older, unversioned format. ".format(filename))
print(" It may not read correctly.")
print(" Reader expects JSON file version {}.".format(file_version_string))
return
# TODO We could do something fancy here using semantic versioning, but
# for now just warn on mismatch.
if version_node["string"] != file_version_string:
print("WARNING:")
print(" {} was written using a different NVBench JSON file version."
.format(filename))
print(" It may not read correctly.")
print(" (file version: {} reader version: {})"
.format(version_node["string"], file_version_string))
| nvbench-main | scripts/nvbench_json/version.py |
from . import reader
from . import version
| nvbench-main | scripts/nvbench_json/__init__.py |
import json
from . import version
def read_file(filename):
with open(filename, "r") as f:
file_root = json.load(f)
version.check_file_version(filename, file_root)
return file_root
| nvbench-main | scripts/nvbench_json/reader.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Key
"""
from typing import Union, List
from functools import reduce
from .constants import diff_str, NO_OP_SCALE
class Key(object):
"""
Class describing keys used for graph unroll.
The most basic key is just a simple string
however you can also add dimension information
and even information on how to scale inputs
to networks.
Parameters
----------
name : str
String used to refer to the variable (e.g. 'x', 'y'...).
size : int=1
Dimension of variable.
derivatives : List=[]
This signifies that this key holds a derivative with
respect to that key.
scale: (float, float)
Characteristic location and scale of quantity: used for normalisation.
"""
def __init__(self, name, size=1, derivatives=[], base_unit=None, scale=NO_OP_SCALE):
super(Key, self).__init__()
self.name = name
self.size = size
self.derivatives = derivatives
self.base_unit = base_unit
self.scale = scale
@classmethod
def from_str(cls, name):
split_name = name.split(diff_str)
var_name = split_name[0]
diff_names = Key.convert_list(split_name[1:])
return cls(var_name, size=1, derivatives=diff_names)
@classmethod
def from_tuple(cls, name_size):
split_name = name_size[0].split(diff_str)
var_name = split_name[0]
diff_names = Key.convert_list(split_name[1:])
return cls(var_name, size=name_size[1], derivatives=diff_names)
@classmethod
def convert(cls, name_or_tuple):
if isinstance(name_or_tuple, str):
key = Key.from_str(name_or_tuple)
elif isinstance(name_or_tuple, tuple):
key = cls.from_tuple(name_or_tuple)
elif isinstance(name_or_tuple, cls):
key = name_or_tuple
else:
raise ValueError("can only convert string or tuple to key")
return key
@staticmethod
def convert_list(ls):
keys = []
for name_or_tuple in ls:
keys.append(Key.convert(name_or_tuple))
return keys
@staticmethod
def convert_config(key_cfg: Union[List, str]):
"""Converts a config input/output key string/list into a key
This provides a quick alternative method for defining keys in models
Parameters
----------
key_cfg : Union[List, str]
Config list or string
Returns
-------
List[Key]
List of keys generated
Example
-------
The following are some config examples for constructing keys in the YAML file.
Defining input/output keys with size of 1
>>> arch:
>>> full_connected:
>>> input_keys: input
>>> output_keys: output
Defining input/output keys with different sizes
>>> arch:
>>> full_connected:
>>> input_keys: [input, 2] # Key('input',size=2)
>>> output_keys: [output, 3] # Key('output',size=3)
Multiple input/output keys with size of 1
>>> arch:
>>> full_connected:
>>> input_keys: [a, b, c]
>>> output_keys: [u, w, v]
Multiple input/output keys with different sizes
>>> arch:
>>> full_connected:
>>> input_keys: [[a,2], [b,3]] # Key('a',size=2), Key('b',size=3)
>>> output_keys: [[u,3],w] # Key('u',size=3), Key('w',size=1)
"""
# Just single key name
if isinstance(key_cfg, str):
keys = [Key.convert(key_cfg.lstrip())]
# Multiple keys
elif isinstance(key_cfg, list):
keys = []
for cfg_obj in key_cfg:
if isinstance(cfg_obj, str):
key = Key.convert(cfg_obj)
keys.append(key)
elif isinstance(cfg_obj, int) and len(keys) > 0:
keys[-1].size = cfg_obj
elif isinstance(cfg_obj, list):
key_name = cfg_obj[0]
key = Key.convert(key_name)
try:
key_size = int(cfg_obj[1])
key.size = key_size
except:
key.size = 1
keys.append(key)
# Manually provided
elif isinstance(cfg_obj, Key):
keys.append(cfg_obj)
else:
raise ValueError(f"Invalid key parameter set in config {key_cfg}")
else:
raise ValueError(f"Invalid key parameter set in config {key_cfg}")
return keys
@property
def unit(self):
return self.base_unit / reduce(
lambda x, y: x.base_unit * y.base_unit, self.derivatives
)
def __str__(self):
diff_str = "".join(["__" + x.name for x in self.derivatives])
return self.name + diff_str
def __repr__(self):
return str(self)
def __eq__(self, obj):
return isinstance(obj, Key) and str(self) == str(obj)
def __lt__(self, obj):
assert isinstance(obj, Key)
return str(self) < str(obj)
def __gt__(self, obj):
assert isinstance(obj, Key)
return str(self) > str(obj)
def __hash__(self):
return hash(str(self))
def _length_key_list(list_keys):
length = 0
for key in list_keys:
length += key.size
return length
| modulus-sym-main | modulus/sym/key.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper functions for unrolling computational graph
"""
from copy import copy
import torch
import logging
from typing import Dict, List, Optional
from .models.arch import Arch, FuncArch
from .node import Node
from .key import Key
from .constants import diff_str
from .eq.derivatives import Derivative
from .manager import JitManager, GraphManager
logger = logging.getLogger(__name__)
class Graph(torch.nn.Module):
"""
Torch Module that is constructed by unrolling a computational graph given
desired inputs, outputs, and evaluatable nodes.
Examples
========
Here is a simple example of using `Graph` to unroll a two node graph.
>>> import torch
>>> from sympy import Symbol
>>> from modulus.sym.node import Node
>>> from modulus.sym.key import Key
>>> from modulus.sym.graph import Graph
>>> node_1 = Node.from_sympy(Symbol('x') + Symbol('y'), 'u')
>>> node_2 = Node.from_sympy(Symbol('u') + 1.0, 'v')
>>> graph = Graph([node_1, node_2], [Key('x'), Key('y')], [Key('v')])
>>> graph.forward({'x': torch.tensor([1.0]), 'y': torch.tensor([2.0])})
{'v': tensor([4.])}
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
invar : List[Key]
List of inputs to graph.
req_names : List[Key]
List of required outputs of graph.
diff_nodes : List[Node]
List of specialty nodes to compute derivatives.
By default this is not needed.
func_arch : bool, Optional
If True, find the computable derivatives that are part of the Jacobian and
Hessian of the neural network. They will be calculated during the forward
pass using FuncArch.
If None (default), will use the GraphManager to get the global flag
(default is False), which could be configured in the hydra config with key
`graph.func_arch`.
func_arch_allow_partial_hessian : bool, Optional
If True, allow evaluating partial hessian to save some unnecessary computations.
For example, when the input is x, outputs are [u, p], and the needed derivatives
are `[u__x, p__x, u__x__x]`, func_arch needs to evaluate the full hessian rows
to be able to extract jacobian `p__x`. When this flag is on, func_arch will
only output `[u__x, u__x__x]`, and `p__x` will be evaluated later by the autograd.
If None (default), will use the GraphManager to get the global flag
(default is True), which could be configured in the hydra config with key
`graph.func_arch_allow_partial_hessian`.
"""
def __init__(
self,
nodes: List[Node],
invar: List[Key],
req_names: List[Key],
diff_nodes: List[Node] = [],
func_arch: Optional[bool] = None,
func_arch_allow_partial_hessian: Optional[bool] = None,
):
super().__init__()
# get configs from the graph manager
graph_manager = GraphManager()
func_arch = func_arch if func_arch is not None else graph_manager.func_arch
func_arch_allow_partial_hessian = (
func_arch_allow_partial_hessian
if func_arch_allow_partial_hessian is not None
else graph_manager.func_arch_allow_partial_hessian
)
self.req_names = req_names
self.computable_names = set(_computable_names(nodes, invar))
# check if graph can be computed
req_names_no_diff = [Key(x.name) for x in req_names]
if not set(req_names_no_diff).issubset(self.computable_names):
_print_graph_unroll_error(nodes, invar, req_names)
raise RuntimeError("Failed Unrolling Graph")
# compute only necessary nodes for req_names
# Walk backwards from the output nodes in the graph and keep adding required inputs
# until all inputs are available in invar
nodes = copy(nodes)
necessary_nodes = []
needed_names = [Key(x.name, derivatives=x.derivatives) for x in req_names] + [
Key(x.name) for x in req_names
]
while True:
finished = True
for i, node in enumerate(nodes):
if not set(node.outputs).isdisjoint(set(needed_names)):
# Make needed names include derivatives!
needed_names += (
node.inputs
+ [
Key(x.name, derivatives=x.derivatives)
for x in node.derivatives
]
+ [Key(x.name) for x in node.derivatives]
)
# needed_names.update(node.inputs() + [Key(x.name) for x in node.derivatives()])
necessary_nodes.append(node)
nodes.pop(i)
finished = False
if finished:
break
# Convert arch node intto func_arch node if we find computable derivatives and the Arch
# instance has supports_func_arch == True
needed_names = set(needed_names)
if func_arch:
for i, node in enumerate(necessary_nodes):
# `jit_mode_arch` is forced to be `only_activation` when func_arch is enabled,
# so all Arch instances will not be `RecursiveScriptModules` and we are good
# to transform it into FuncArch
if isinstance(node.evaluate, Arch):
if node.evaluate.supports_func_arch:
computable_derivatives = (
node.evaluate._find_computable_deriv_with_func_arch(
needed_names, func_arch_allow_partial_hessian
)
)
if len(computable_derivatives):
node_name = necessary_nodes[i].name
necessary_nodes[i] = FuncArch(
node.evaluate, computable_derivatives
).make_node(node_name)
logger.info(
f"{node_name} has been converted to a FuncArch node."
)
else:
logger.warning(
f"Arch {type(node.evaluate)} currently does not support FuncArch"
)
# unroll graph with only necessary nodes
# Store node evaluation order to use at runtime
self.node_evaluation_order = []
outvar = copy(invar)
while True:
# compute all nodes that don't need derivative calls
while True:
finished = True
for i, node in enumerate(necessary_nodes):
if set(node.inputs + node.derivatives).issubset(set(outvar)):
self.node_evaluation_order.append(node)
outvar += node.outputs
necessary_nodes.pop(i)
finished = False
if finished:
break
# compute derivative calls all at once
needed_derivatives = []
for node in necessary_nodes:
needed_derivatives += node.derivatives
needed_derivatives += [x for x in req_names if x.derivatives]
needed_derivatives = [
diff for diff in needed_derivatives if diff not in outvar
] # remove already computed diffs
if len(needed_derivatives) > 0:
# check if solution in diff nodes
try_auto_diff = True
for dn in diff_nodes:
if (not set(dn.outputs).isdisjoint(set(needed_derivatives))) and (
set(dn.inputs).issubset(set(outvar))
):
# input_variables = Variables.subset(outvar, dn.inputs())
# outvar.update(dn.evaluate(input_variables))
self.node_evaluation_order.append(dn)
outvar += dn.outputs
try_auto_diff = False
# compute first derivatives only
if try_auto_diff:
# Variables.differentiate(outvar, outvar, needed_derivatives)
dnode = Derivative.make_node(
outvar,
needed_derivatives,
jit=(JitManager().enabled and JitManager().autograd_nodes),
)
self.node_evaluation_order.append(dnode)
outvar += dnode.outputs
# check if finished
if set(req_names).issubset(set(outvar)):
# return Variables({key: value for key, value in outvar.items() if key in req_names})
break
self.evaluation_order = torch.nn.ModuleList(
[n.evaluate for n in self.node_evaluation_order]
)
self.node_names: List[str] = [n.name for n in self.node_evaluation_order]
self.optimizer_list = torch.nn.ModuleList(
[n.evaluate for n in self.node_evaluation_order if n.optimize]
)
if graph_manager.debug:
print(self)
def forward(self, invar: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
outvar = invar
for i, e in enumerate(self.evaluation_order):
torch.cuda.nvtx.range_push(self.node_names[i])
outvar.update(e(outvar))
torch.cuda.nvtx.range_pop()
outvar = {
key: value for key, value in outvar.items() if Key(key) in self.req_names
}
return outvar
def __str__(self):
repr = "=" * 100 + "\n"
for node in self.node_evaluation_order:
repr += "-" * 50 + "\n"
repr += str(node) + "\n"
return repr
def _print_graph_unroll_error(nodes, invar, req_names):
print("####################################")
print("could not unroll graph!")
print(
"This is probably because you are asking to compute a value that is not an output of any node"
)
print("####################################")
print("invar: " + str(list(invar)))
print("requested var: " + str(req_names))
print("computable var: " + str(_computable_names(nodes, invar)))
print("####################################")
print("Nodes in graph: ")
for node in nodes:
print(node)
print("####################################")
def _computable_names(nodes, invar):
nodes = copy(nodes)
computable_names = copy(invar)
while True:
finished = True
for i, node in enumerate(nodes):
if set(node.inputs).issubset(set(computable_names)):
computable_names += node.outputs
nodes.pop(i)
finished = False
if finished:
return computable_names
| modulus-sym-main | modulus/sym/graph.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
constant values used by Modulus
"""
import torch
import numpy as np
# string used to determine derivatives
diff_str: str = "__"
# function to apply diff string
def diff(y: str, x: str, degree: int = 1) -> str:
return diff_str.join([y] + degree * [x])
# for changing to float16 or float64
tf_dt = torch.float32
np_dt = np.float32
# tensorboard naming
TF_SUMMARY = False
# Pytorch Version for which JIT will be default on
JIT_PYTORCH_VERSION = "2.1.0a0+4136153"
# No scaling is needed if using NO_OP_SCALE
NO_OP_SCALE = (0.0, 1.0)
# If using NO_OP_NORM, it is effectively doing no normalization
NO_OP_NORM = (-1.0, 1.0)
| modulus-sym-main | modulus/sym/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.2.0a0"
from pint import UnitRegistry
from .node import Node
from .key import Key
from .hydra.utils import main, compose
# pint unit registry
ureg = UnitRegistry()
quantity = ureg.Quantity
| modulus-sym-main | modulus/sym/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus Solver
"""
import os
import time
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.cuda.amp import GradScaler
import torch.nn as nn
import torch.cuda.profiler as profiler
import torch.distributed as dist
from termcolor import colored, cprint
from copy import copy
from operator import add
from omegaconf import DictConfig, OmegaConf
import hydra
import itertools
from collections import Counter
from typing import Dict, List, Optional
import logging
from contextlib import ExitStack
from .domain.constraint import Constraint
from .domain import Domain
from .loss.aggregator import Sum
from .utils.training.stop_criterion import StopCriterion
from .constants import TF_SUMMARY, JIT_PYTORCH_VERSION
from .hydra import (
instantiate_optim,
instantiate_sched,
instantiate_agg,
add_hydra_run_path,
)
from .distributed.manager import DistributedManager
class AdamMixin:
"""Special functions for training using the standard optimizers
Should be used with ADAM, SGD, RMSProp, etc.
"""
def adam_compute_gradients(
self, aggregator: nn.Module, global_optimizer_model: nn.Module, step: int
):
loss, losses = 0, Counter({})
for agg_step in range(self.grad_agg_freq):
with torch.autocast(
self.device_amp, enabled=self.amp, dtype=self.amp_dtype
):
torch.cuda.nvtx.range_push("Loss computation")
losses_minibatch = self.compute_losses(step)
torch.cuda.nvtx.range_pop()
losses_minibatch = {
key: value / self.grad_agg_freq
for key, value in losses_minibatch.items()
}
torch.cuda.nvtx.range_push("Loss aggregator")
loss_minibatch = aggregator(losses_minibatch, step)
torch.cuda.nvtx.range_pop()
loss += loss_minibatch
torch.cuda.nvtx.range_push("Weight gradients")
self.scaler.scale(loss_minibatch).backward()
torch.cuda.nvtx.range_pop()
losses.update(losses_minibatch)
return loss, dict(losses)
def adam_apply_gradients(self):
self.scaler.step(self.optimizer)
self.scaler.update()
class AdaHessianMixin:
"""Special functions for training using the higher-order optimizer AdaHessian"""
def adahess_compute_gradients(
self, aggregator: nn.Module, global_optimizer_model: nn.Module, step: int
):
if self.amp:
raise NotImplementedError("AMP is not supported for this optimizer.")
# With data hessian we need to keep grad graph on back-prop to approximate
# the hessian with. The suggested PyTorch way is to use torch.grad instead
# of backward.
loss, losses = 0, Counter({})
grads = [
torch.zeros_like(parameter)
for parameter in list(global_optimizer_model.parameters())
]
for agg_step in range(self.grad_agg_freq):
losses_minibatch = self.compute_losses(step)
losses_minibatch = {
key: value / self.grad_agg_freq
for key, value in losses_minibatch.items()
}
loss_minibatch = aggregator(losses_minibatch, step)
grads_step = torch.autograd.grad(
loss_minibatch,
list(global_optimizer_model.parameters()),
create_graph=True,
)
grads = list(map(add, grads, grads_step))
loss += loss_minibatch
losses.update(losses_minibatch)
# Set gradients of models manually
for grad, param in zip(grads, global_optimizer_model.parameters()):
param.grad = grad
return loss, dict(losses)
def adahess_apply_gradients(self):
self.adam_apply_gradients()
class BFGSMixin:
"""Special functions for training using BFGS optimizer"""
def bfgs_compute_gradients(
self, aggregator: nn.Module, global_optimizer_model: nn.Module, step: int
):
# Dummy functioned used entirely just for logging purposes and storing
# objects for internal BFGS updates. Gradients are not calc'd here for BFGS
if self.amp:
raise NotImplementedError("AMP is not supported for this optimizer.")
if self.max_steps != 0:
self.log.warning("lbfgs optimizer selected. Setting max_steps to 0")
self.max_steps = 0
if self.grad_agg_freq != 1:
self.log.warning("lbfgs optimizer selected. Setting grad_agg_freq to 1")
self.grad_agg_freq = 1
losses = self.compute_losses(step)
loss = aggregator(losses, step)
self.bfgs_step = step
self.bfgs_aggregator = aggregator
# Re-zero any gradients
for param in global_optimizer_model.parameters():
param.grad = None
return loss, losses
def bfgs_closure_func(self):
self.optimizer.zero_grad()
loss = 0
losses = self.compute_losses(self.bfgs_step)
loss = self.bfgs_aggregator(losses, self.bfgs_step)
loss.backward()
self.bfgs_optim_steps += 1
return loss
def bfgs_apply_gradients(self):
assert (
not self.bfgs_aggregator is None
), "Call bfgs_compute_gradients prior to this!"
assert not self.bfgs_step is None, "Call bfgs_compute_gradients prior to this!"
self.bfgs_optim_steps = 0
self.log.info(f"[step: {self.bfgs_step:10d}] lbfgs optimization in running")
self.optimizer.step(self.bfgs_closure_func)
self.log.info(
f"lbfgs optimization completed after {self.bfgs_optim_steps} steps"
)
# base class for optimizing networks on loss
class Trainer(AdamMixin, AdaHessianMixin, BFGSMixin):
"""Base class for optimizing networks on losses/constraints"""
def __init__(self, cfg: DictConfig):
super(Trainer, self).__init__()
# Save a local copy of the config
self.cfg = cfg
# set training parameters
self._network_dir = self.cfg.network_dir
self._initialization_network_dir = self.cfg.initialization_network_dir
self.max_steps = self.cfg.training.max_steps
self.grad_agg_freq = self.cfg.training.grad_agg_freq
self.save_network_freq = self.cfg.training.save_network_freq
self.print_stats_freq = self.cfg.training.print_stats_freq
self.summary_freq = self.cfg.training.summary_freq
self.amp = self.cfg.training.amp
self.stop_criterion_metric = self.cfg.stop_criterion.metric
self.stop_criterion_min_delta = self.cfg.stop_criterion.min_delta
self.stop_criterion_patience = self.cfg.stop_criterion.patience
self.stop_criterion_mode = self.cfg.stop_criterion.mode
self.stop_criterion_freq = self.cfg.stop_criterion.freq
self.stop_criterion_strict = self.cfg.stop_criterion.strict
self.save_filetypes = self.cfg.save_filetypes
self.summary_histograms = self.cfg.summary_histograms
self.apply_gradients = self._apply_gradients
self.compute_gradients = self._compute_gradients
# make logger
self.log = logging.getLogger(__name__)
# Set distributed manager
self.manager = DistributedManager()
# set device
self.device = self.manager.device
self.device_amp = "cuda" if self.manager.cuda else "cpu"
# set amp dtype
if self.cfg.training.amp_dtype == "bfloat16" or self.device_amp == "cpu":
self.amp_dtype = torch.bfloat16
if self.device_amp == "cpu" and self.amp:
self.log.warning(
"Switching amp_dtype to bfloat16, AutocastCPU only supports bfloat16"
)
else:
self.amp_dtype = torch.float16
def compute_losses(self, step: int):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def _compute_gradients(self):
raise NotImplementedError("Config should set the compute_gradients function")
def _apply_gradients(self):
raise NotImplementedError("Config should set the apply_gradients function")
def get_saveable_models(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def create_global_optimizer_model(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def load_network(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def save_checkpoint(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def record_constraints(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def record_validators(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
@property
def has_validators(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def record_inferencers(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
@property
def has_inferencers(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def record_monitors(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
@property
def has_monitors(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def get_num_losses(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def _record_constraints(self):
data_parallel_rank = (
self.manager.group_rank("data_parallel") if self.manager.distributed else 0
)
if data_parallel_rank == 0:
rec_inferencer_start = time.time()
self.record_constraints()
self.log.debug(
f"{self.step_str} saved constraint results to {self.network_dir}"
)
self.log.info(
f"{self.step_str} record constraint batch time: {time.time()-rec_inferencer_start:10.3e}s"
)
def _record_validators(self, step):
data_parallel_rank = (
self.manager.group_rank("data_parallel") if self.manager.distributed else 0
)
if data_parallel_rank == 0:
rec_validation_start = time.time()
self.validator_outvar = self.record_validators(step)
self.log.debug(
f"{self.step_str} saved validator results to {self.network_dir}"
)
self.log.info(
f"{self.step_str} record validators time: {time.time()-rec_validation_start:10.3e}s"
)
def _record_inferencers(self, step):
data_parallel_rank = (
self.manager.group_rank("data_parallel") if self.manager.distributed else 0
)
if data_parallel_rank == 0:
rec_inferencer_start = time.time()
self.record_inferencers(step)
self.log.debug(
f"{self.step_str} saved inferencer results to {self.network_dir}"
)
self.log.info(
f"{self.step_str} record inferencers time: {time.time()-rec_inferencer_start:10.3e}s"
)
def _record_monitors(self, step):
data_parallel_rank = (
self.manager.group_rank("data_parallel") if self.manager.distributed else 0
)
if data_parallel_rank == 0:
rec_monitor_start = time.time()
self.monitor_outvar = self.record_monitors(step)
self.log.debug(
f"{self.step_str} saved monitor results to {self.network_dir}"
)
# write parameter histograms to tensorboard
if self.summary_histograms:
for (
name,
parameter,
) in self.global_optimizer_model.named_parameters():
name = name.split(".")
name = ".".join(name[:-1]) + "/" + ".".join(name[-1:])
self.writer.add_histogram(name, parameter.detach().flatten(), step)
if parameter.grad is not None:
self.writer.add_histogram(
name + "_gradient",
parameter.grad.detach().flatten(),
step,
)
self.log.info(
f"{self.step_str} record monitor time: {time.time()-rec_monitor_start:10.3e}s"
)
# check if stopping criterion is met
def _check_stopping_criterion(self, loss, losses, step):
if self.manager.rank == 0:
if self.stop_criterion_metric is None:
return False
elif step % self.stop_criterion_freq == 0:
criterion_metric_dict = {"loss": {"loss": loss.cpu().detach().numpy()}}
criterion_metric_dict["loss"].update(
{key: val.cpu().detach().numpy() for key, val in losses.items()}
)
if self.has_monitors:
criterion_metric_dict.update(
{
"monitor": {
key: val.cpu().detach().numpy()
for key, val in self.monitor_outvar.items()
}
}
)
if self.has_validators:
criterion_metric_dict.update(
{
"validation": {
key: val.cpu().detach().numpy()
for key, val in self.validator_outvar.items()
}
}
)
stop_training = self.stop_criterion.evaluate(criterion_metric_dict)
return stop_training
else:
return False
def _train_loop(
self,
sigterm_handler=None,
): # TODO this train loop may be broken up into methods if need for future children classes
# make directory if doesn't exist
if self.manager.rank == 0:
# exist_ok=True to skip creating directory that already exists
os.makedirs(self.network_dir, exist_ok=True)
# create global model for restoring and saving
self.saveable_models = self.get_saveable_models()
self.global_optimizer_model = self.create_global_optimizer_model()
# initialize optimizer from hydra
self.compute_gradients = getattr(
self, self.cfg.optimizer._params_.compute_gradients
)
self.apply_gradients = getattr(
self, self.cfg.optimizer._params_.apply_gradients
)
self.optimizer = instantiate_optim(self.cfg, model=self.global_optimizer_model)
# initialize scheduler from hydra
self.scheduler = instantiate_sched(self.cfg, optimizer=self.optimizer)
# initialize aggregator from hydra
self.aggregator = instantiate_agg(
self.cfg,
model=self.global_optimizer_model.parameters(),
num_losses=self.get_num_losses(),
)
if self.cfg.jit:
# Warn user if pytorch version difference
if not torch.__version__ == JIT_PYTORCH_VERSION:
self.log.warn(
f"Installed PyTorch version {torch.__version__} is not TorchScript"
+ f" supported in Modulus. Version {JIT_PYTORCH_VERSION} is officially supported."
)
self.aggregator = torch.jit.script(self.aggregator)
if self.amp:
torch._C._jit_set_autocast_mode(True)
if len(list(self.aggregator.parameters())) > 0:
self.log.debug("Adding loss aggregator param group. LBFGS will not work!")
self.optimizer.add_param_group(
{"params": list(self.aggregator.parameters())}
)
# create grad scalar for AMP
# grad scaler is only available for float16 dtype on cuda device
enable_scaler = self.amp and self.amp_dtype == torch.float16
self.scaler = GradScaler(enabled=enable_scaler)
# make stop criterion
if self.stop_criterion_metric is not None:
self.stop_criterion = StopCriterion(
self.stop_criterion_metric,
self.stop_criterion_min_delta,
self.stop_criterion_patience,
self.stop_criterion_mode,
self.stop_criterion_freq,
self.stop_criterion_strict,
self.cfg.training.rec_monitor_freq,
self.cfg.training.rec_validation_freq,
)
# load network
self.initial_step = self.load_network()
# # make summary writer
self.writer = SummaryWriter(
log_dir=self.network_dir, purge_step=self.summary_freq + 1
)
self.summary_histograms = self.cfg["summary_histograms"]
# write tensorboard config
if self.manager.rank == 0:
self.writer.add_text(
"config", f"<pre>{str(OmegaConf.to_yaml(self.cfg))}</pre>"
)
# create profiler
try:
self.profile = self.cfg.profiler.profile
self.profiler_start_step = self.cfg.profiler.start_step
self.profiler_end_step = self.cfg.profiler.end_step
if self.profiler_end_step < self.profiler_start_step:
self.profile = False
except:
self.profile = False
self.profiler_start_step = -1
self.profiler_end_step = -1
# Distributed barrier before starting the train loop
if self.manager.distributed:
dist.barrier(device_ids=[self.manager.local_rank])
barrier_flag = False
if self.manager.cuda:
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
else:
t = time.time()
# termination signal handler
if sigterm_handler is None:
self.sigterm_handler = lambda: False
else:
self.sigterm_handler = sigterm_handler
# train loop
with ExitStack() as stack:
if self.profile:
# Add NVTX context if in profile mode
self.log.warning("Running in profiling mode")
stack.enter_context(torch.autograd.profiler.emit_nvtx())
for step in range(self.initial_step, self.max_steps + 1):
if self.sigterm_handler():
if self.manager.rank == 0:
self.log.info(
f"Training terminated by the user at iteration {step}"
)
break
if self.profile and step == self.profiler_start_step:
# Start profiling
self.log.info("Starting profiler at step {}".format(step))
profiler.start()
if self.profile and step == self.profiler_end_step:
# Stop profiling
self.log.info("Stopping profiler at step {}".format(step))
profiler.stop()
torch.cuda.nvtx.range_push("Training iteration")
if self.cfg.cuda_graphs:
# If cuda graphs statically load it into defined allocations
self.load_data(static=True)
loss, losses = self._cuda_graph_training_step(step)
else:
# Load all data for constraints
self.load_data()
self.global_optimizer_model.zero_grad(set_to_none=True)
# compute gradients
loss, losses = self.compute_gradients(
self.aggregator, self.global_optimizer_model, step
)
# take optimizer step
self.apply_gradients()
# take scheduler step
self.scheduler.step()
# check for nans in loss
if torch.isnan(loss):
self.log.error("loss went to Nans")
break
self.step_str = f"[step: {step:10d}]"
# write train loss / learning rate tensorboard summaries
if step % self.summary_freq == 0:
if self.manager.rank == 0:
# add train loss scalars
for key, value in losses.items():
if TF_SUMMARY:
self.writer.add_scalar(
"Train_/loss_L2" + str(key),
value,
step,
new_style=True,
)
else:
self.writer.add_scalar(
"Train/loss_" + str(key),
value,
step,
new_style=True,
)
if TF_SUMMARY:
self.writer.add_scalar(
"Optimzer/loss", loss, step, new_style=True
)
self.writer.add_scalar(
"learning_rate/lr",
self.scheduler.get_last_lr()[0], # TODO: handle list
step,
new_style=True,
)
else:
self.writer.add_scalar(
"Train/loss_aggregated", loss, step, new_style=True
)
self.writer.add_scalar(
"Train/learning_rate",
self.scheduler.get_last_lr()[0], # TODO: handle list
step,
new_style=True,
)
if self.manager.distributed:
barrier_flag = True
# write train / inference / validation datasets to tensorboard and file
if step % self.cfg.training.rec_constraint_freq == 0:
barrier_flag = True
self._record_constraints()
if (step % self.cfg.training.rec_validation_freq == 0) and (
self.has_validators
):
barrier_flag = True
self._record_validators(step)
if (step % self.cfg.training.rec_inference_freq == 0) and (
self.has_inferencers
):
barrier_flag = True
self._record_inferencers(step)
if (step % self.cfg.training.rec_monitor_freq == 0) and (
self.has_monitors
):
barrier_flag = True
self._record_monitors(step)
# save checkpoint
if step % self.save_network_freq == 0:
# Get data parallel rank so all processes in the first model parallel group
# can save their checkpoint. In the case without model parallelism, data_parallel_rank
# should be the same as the process rank itself
data_parallel_rank = (
self.manager.group_rank("data_parallel")
if self.manager.distributed
else 0
)
if data_parallel_rank == 0:
self.save_checkpoint(step)
self.log.info(
f"{self.step_str} saved checkpoint to {add_hydra_run_path(self.network_dir)}"
)
if self.manager.distributed:
barrier_flag = True
if self.manager.distributed and barrier_flag:
dist.barrier(device_ids=[self.manager.local_rank])
barrier_flag = False
# print loss stats
if step % self.print_stats_freq == 0:
# synchronize and get end time
if self.manager.cuda:
end_event.record()
end_event.synchronize()
elapsed_time = start_event.elapsed_time(
end_event
) # in milliseconds
else:
t_end = time.time()
elapsed_time = (t_end - t) * 1.0e3 # in milliseconds
# Reduce loss across all GPUs
if self.manager.distributed:
dist.reduce(loss, 0, op=dist.ReduceOp.AVG)
elapsed_time = torch.tensor(elapsed_time).to(self.device)
dist.reduce(elapsed_time, 0, op=dist.ReduceOp.AVG)
elapsed_time = elapsed_time.cpu().numpy()[()]
# print statement
print_statement = (
f"{self.step_str} loss: {loss.cpu().detach().numpy():10.3e}"
)
if step >= self.initial_step + self.print_stats_freq:
print_statement += f", time/iteration: {elapsed_time/self.print_stats_freq:10.3e} ms"
if self.manager.rank == 0:
self.log.info(print_statement)
if self.manager.cuda:
start_event.record()
else:
t = time.time()
# check stopping criterion
stop_training = self._check_stopping_criterion(loss, losses, step)
if stop_training:
if self.manager.rank == 0:
self.log.info(
f"{self.step_str} stopping criterion is met, finished training!"
)
break
# check max steps
if step >= self.max_steps:
if self.manager.rank == 0:
self.log.info(
f"{self.step_str} reached maximum training steps, finished training!"
)
break
torch.cuda.nvtx.range_pop()
def _cuda_graph_training_step(self, step: int):
# Training step method for using cuda graphs
# Warm up
if (step - self.initial_step) < self.cfg.cuda_graph_warmup:
if (step - self.initial_step) == 0:
# Default stream for warm up
self.warmup_stream = torch.cuda.Stream()
self.warmup_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.warmup_stream):
# zero optimizer gradients
self.global_optimizer_model.zero_grad(set_to_none=True)
# # compute gradients
self.loss_static, self.losses_static = self.compute_gradients(
self.aggregator, self.global_optimizer_model, step
)
torch.cuda.current_stream().wait_stream(self.warmup_stream)
# take optimizer step
self.apply_gradients()
# take scheduler step
self.scheduler.step()
# Record graph
elif (step - self.initial_step) == self.cfg.cuda_graph_warmup:
torch.cuda.synchronize()
if self.manager.distributed:
dist.barrier(device_ids=[self.manager.local_rank])
if self.cfg.cuda_graph_warmup < 11:
self.log.warn(
f"Graph warm up length ({self.cfg.cuda_graph_warmup}) should be more than 11 steps, higher suggested"
)
self.log.info("Attempting cuda graph building, this may take a bit...")
self.g = torch.cuda.CUDAGraph()
self.global_optimizer_model.zero_grad(set_to_none=True)
# TODO: temporary workaround till this issue is fixed:
# https://github.com/pytorch/pytorch/pull/104487#issuecomment-1638665876
delay = os.environ.get("MODULUS_CUDA_GRAPH_CAPTURE_DELAY", "10")
time.sleep(int(delay))
with torch.cuda.graph(self.g):
# compute gradients
self.loss_static, self.losses_static = self.compute_gradients(
self.aggregator, self.global_optimizer_model, step
)
# take optimizer step
# left out of graph for AMP compat, No perf difference
self.apply_gradients()
# take scheduler step
self.scheduler.step()
# Replay
else:
# Graph replay
self.g.replay()
# take optimizer step
self.apply_gradients()
self.scheduler.step()
return self.loss_static, self.losses_static
def _eval(
self,
):
# check the directory exists
if not os.path.exists(self.network_dir):
raise RuntimeError("Network checkpoint is required for eval mode.")
# create global model for restoring and saving
self.saveable_models = self.get_saveable_models()
# set device
if self.device is None:
self.device = self.manager.device
# load model
self.step = self.load_step()
self.step = self.load_model()
self.step_str = f"[step: {self.step:10d}]"
# make summary writer
self.writer = SummaryWriter(
log_dir=self.network_dir, purge_step=self.summary_freq + 1
)
self.summary_histograms = self.cfg["summary_histograms"]
if self.manager.cuda:
torch.cuda.synchronize(self.device)
# write inference / validation datasets to tensorboard and file
if self.has_validators:
self._record_validators(self.step)
if self.has_inferencers:
self._record_inferencers(self.step)
if self.has_monitors:
self._record_monitors(self.step)
def _stream(
self,
):
# check the directory exists
if not os.path.exists(self.network_dir):
raise RuntimeError("Network checkpoint is required for stream mode.")
# create global model for restoring and saving
self.saveable_models = self.get_saveable_models()
# set device
if self.device is None:
self.device = self.manager.device
# load model
self.step = self.load_step()
self.step = self.load_model()
self.step_str = f"[step: {self.step:10d}]"
if self.manager.cuda:
torch.cuda.synchronize(self.device)
# write streamed results to file
return self.record_stream
@staticmethod
def _load_network(
initialization_network_dir: str,
network_dir: str,
models: List[nn.Module],
optimizer: Optimizer,
aggregator: nn.Module,
scheduler: _LRScheduler,
scaler: GradScaler,
log: logging.Logger,
manager: DistributedManager,
device: Optional[torch.device] = None,
):
# set device
if device is None:
device = manager.device
# load optimizer
step = Trainer._load_optimizer(
network_dir,
optimizer,
aggregator,
scheduler,
scaler,
log,
device,
)
# load model
step = Trainer._load_model(
initialization_network_dir,
network_dir,
models,
step,
log,
device,
)
return step
@staticmethod
def _load_optimizer(
network_dir: str,
optimizer: Optimizer,
aggregator: nn.Module,
scheduler: _LRScheduler,
scaler: GradScaler,
log: logging.Logger,
device: torch.device,
):
manager = DistributedManager()
model_parallel_rank = (
manager.group_rank("model_parallel") if manager.distributed else 0
)
# attempt to restore optimizer
optimizer_checkpoint_file = (
network_dir + f"/optim_checkpoint.{model_parallel_rank}.pth"
)
log.info("attempting to restore from: " + add_hydra_run_path(network_dir))
if os.path.exists(optimizer_checkpoint_file):
try:
checkpoint = torch.load(optimizer_checkpoint_file, map_location=device)
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
aggregator.load_state_dict(checkpoint["aggregator_state_dict"])
scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
scaler.load_state_dict(checkpoint["scaler_state_dict"])
step = checkpoint["step"]
success = colored("Success loading optimizer: ", "green")
log.info(success + add_hydra_run_path(optimizer_checkpoint_file))
except:
fail = colored("Fail loading optimizer: ", "red")
step = 0
log.info(
fail + add_hydra_run_path(network_dir + "/optim_checkpoint.pth")
)
else:
log.warning("optimizer checkpoint not found")
step = 0
return step
@staticmethod
def _load_model(
initialization_network_dir: str,
network_dir: str,
models: List[nn.Module],
step: int,
log: logging.Logger,
device: torch.device,
):
manager = DistributedManager()
model_parallel_rank = (
manager.group_rank("model_parallel") if manager.distributed else 0
)
# attempt to restrore from initialization network dir
if initialization_network_dir != "":
for i_dir in initialization_network_dir.split(","):
if os.path.exists(i_dir):
log.info("attempting to initialize network from " + i_dir)
for model in models:
if os.path.exists(i_dir + "/" + model.checkpoint_filename):
try:
model.load(i_dir, map_location=device)
success = colored("Success loading model: ", "green")
log.info(
success + i_dir + "/" + model.checkpoint_filename
)
except:
fail = colored("Fail loading model: ", "red")
step = 0
log.error(
fail + i_dir + "/" + model.checkpoint_filename
)
else:
log.warning(
"model "
+ model.checkpoint_filename
+ " not found for initialization"
)
# attempt to restore models
for model in models:
if os.path.exists(network_dir + "/" + model.checkpoint_filename):
try:
model.load(network_dir, map_location=device)
success = colored("Success loading model: ", "green")
log.info(
success
+ add_hydra_run_path(
network_dir + "/" + model.checkpoint_filename
)
)
except:
fail = colored("Fail loading model: ", "red")
log.info(
fail
+ add_hydra_run_path(
network_dir + "/" + model.checkpoint_filename
)
)
else:
log.warning("model " + model.checkpoint_filename + " not found")
step = 0
return step
@staticmethod
def _load_step(
network_dir: str,
device: Optional[torch.device] = None,
):
manager = DistributedManager()
model_parallel_rank = (
manager.group_rank("model_parallel") if manager.distributed else 0
)
if os.path.exists(network_dir + f"/optim_checkpoint.{model_parallel_rank}.pth"):
try:
checkpoint = torch.load(
network_dir + f"/optim_checkpoint.{model_parallel_rank}.pth",
map_location=device,
)
step = checkpoint["step"]
except:
step = 0
else:
step = 0
return step
@staticmethod
def _save_checkpoint(
network_dir: str,
models: List[nn.Module],
optimizer: Optimizer,
aggregator: nn.Module,
scheduler: _LRScheduler,
scaler: GradScaler,
step: int,
):
# Get model parallel rank so all processes in the first model parallel group
# can save their checkpoint. In the case without model parallelism, model_parallel_rank
# should be the same as the process rank itself and only rank 0 saves
manager = DistributedManager()
model_parallel_rank = (
manager.group_rank("model_parallel") if manager.distributed else 0
)
# save models
for model in models:
model.save(network_dir)
# save step, optimizer, aggregator, and scaler
torch.save(
{
"step": step,
"optimizer_state_dict": optimizer.state_dict(),
"aggregator_state_dict": aggregator.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"scaler_state_dict": scaler.state_dict(),
},
network_dir + f"/optim_checkpoint.{model_parallel_rank}.pth",
)
| modulus-sym-main | modulus/sym/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus nodes
"""
from sympy import Add
import torch
from .constants import diff_str
from .key import Key
class Node:
"""
Base class for all nodes used to unroll computational graph in Modulus.
Parameters
----------
inputs : List[Union[str, Key]]
Names of inputs to node. For example, `inputs=['x', 'y']`.
outputs : List[Union[str, Key]]
Names of outputs to node. For example, `inputs=['u', 'v', 'p']`.
evaluate : Pytorch Function
A pytorch function that takes in a dictionary of tensors whose keys are the above `inputs`.
name : str
Name of node for print statements and debugging.
optimize : bool
If true then any trainable parameters contained in the node will be optimized by the `Trainer`.
"""
def __init__(self, inputs, outputs, evaluate, name="Node", optimize=False):
super().__init__()
self._inputs = Key.convert_list([x for x in inputs if diff_str not in str(x)])
self._outputs = Key.convert_list(outputs)
self._derivatives = Key.convert_list([x for x in inputs if diff_str in str(x)])
self.evaluate = evaluate
self._name = name
self._optimize = optimize
# set evaluate saveable to false if doesn't exist
if not hasattr(self.evaluate, "saveable"):
self.evaluate.saveable = False
# check that model has name if optimizable
if self._optimize:
assert hasattr(
self.evaluate, "name"
), "Optimizable nodes require model to have unique name"
@classmethod
def from_sympy(cls, eq, out_name, freeze_terms=[], detach_names=[]):
"""
generates a Modulus Node from a SymPy equation
Parameters
----------
eq : Sympy Symbol/Exp
the equation to convert to a Modulus Node. The
inputs to this node consist of all Symbols,
Functions, and derivatives of Functions. For example,
`f(x,y) + f(x,y).diff(x) + k` will be converted
to a node whose input is [`f,f__x,k`].
out_name : str
This will be the name of the output for the node.
freeze_terms : List[int]
The terms that need to be frozen
detach_names : List[str]
This will detach the inputs of the resulting node.
Returns
-------
node : Node
"""
from modulus.sym.utils.sympy.torch_printer import (
torch_lambdify,
_subs_derivatives,
SympyToTorch,
)
# sub all functions and derivatives with symbols
sub_eq = _subs_derivatives(eq)
# construct Modulus node
if bool(freeze_terms):
print(
"the terms "
+ str(freeze_terms)
+ " will be frozen in the equation "
+ str(out_name)
+ ": "
+ str(Add.make_args(sub_eq))
)
print("Verify before proceeding!")
else:
pass
evaluate = SympyToTorch(sub_eq, out_name, freeze_terms, detach_names)
inputs = Key.convert_list(evaluate.keys)
outputs = Key.convert_list([out_name])
node = cls(inputs, outputs, evaluate, name="Sympy Node: " + out_name)
return node
@property
def name(self):
return self._name
@property
def outputs(self):
"""
Returns
-------
outputs : List[str]
Outputs of node.
"""
return self._outputs
@property
def inputs(self):
"""
Returns
-------
inputs : List[str]
Inputs of node.
"""
return self._inputs
@property
def derivatives(self):
"""
Returns
-------
derivatives : List[str]
Derivative inputs of node.
"""
return self._derivatives
@property
def optimize(self):
return self._optimize
def __str__(self):
return (
"node: "
+ self.name
+ "\n"
+ "evaluate: "
+ str(self.evaluate.__class__.__name__)
+ "\n"
+ "inputs: "
+ str(self.inputs)
+ "\n"
+ "derivatives: "
+ str(self.derivatives)
+ "\n"
+ "outputs: "
+ str(self.outputs)
+ "\n"
+ "optimize: "
+ str(self.optimize)
)
| modulus-sym-main | modulus/sym/node.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus Managers
"""
import logging
from typing import Dict, List, Union
from enum import Enum
import torch
from packaging import version
from modulus.sym.constants import JIT_PYTORCH_VERSION
logger = logging.getLogger(__name__)
class JitArchMode(Enum):
ALL = 0
ONLY_ACTIVATION = 1
class JitManager(object):
_shared_state = {}
def __new__(cls):
obj = super(JitManager, cls).__new__(cls)
obj.__dict__ = cls._shared_state
# Set the defaults
if not hasattr(obj, "_enabled"):
obj._enabled = version.parse(torch.__version__) >= version.parse(
JIT_PYTORCH_VERSION
)
if not hasattr(obj, "_arch_mode"):
obj._arch_mode = JitArchMode.ONLY_ACTIVATION
if not hasattr(obj, "_use_nvfuser"):
obj._use_nvfuser = True
if not hasattr(obj, "_autograd_nodes"):
obj._autograd_nodes = False
return obj
@property
def arch_mode(self):
return self._arch_mode
@arch_mode.setter
def arch_mode(self, mode: str):
if mode == "all":
self._arch_mode = JitArchMode.ALL
elif mode == "only_activation":
self._arch_mode = JitArchMode.ONLY_ACTIVATION
else:
raise ValueError(
f"jit arch mode should be all/only_activation, but found {mode}"
)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, flag):
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/codegen/cuda/README.md
# enable fusing single node and prevent tiny autodiff graph are inlined/reverted
if flag:
torch._C._jit_set_nvfuser_single_node_mode(True)
torch._C._debug_set_autodiff_subgraph_inlining(False)
self._enabled = flag
@property
def use_nvfuser(self):
return self._use_nvfuser
@use_nvfuser.setter
def use_nvfuser(self, flag):
self._use_nvfuser = flag
torch._C._jit_set_nvfuser_enabled(flag)
backend = "NVFuser" if flag else "NNC"
if self.enabled:
logger.info(f"JIT using the {backend} TorchScript backend")
@property
def autograd_nodes(self):
return self._autograd_nodes
@autograd_nodes.setter
def autograd_nodes(self, flag):
self._autograd_nodes = flag
def __repr__(self):
return f"JitManager: {self._shared_state}"
def init(self, enabled, arch_mode, use_nvfuser, autograd_nodes):
self.enabled = enabled
self.arch_mode = arch_mode
self.use_nvfuser = use_nvfuser
self.autograd_nodes = autograd_nodes
class GraphManager(object):
_shared_state = {}
def __new__(cls):
obj = super(GraphManager, cls).__new__(cls)
obj.__dict__ = cls._shared_state
# Set the defaults
if not hasattr(obj, "_func_arch"):
obj._func_arch = True
# TODO we should have a debug flag in the global ModulusManager
# in the future
if not hasattr(obj, "_debug"):
obj._debug = False
if not hasattr(obj, "_func_arch_allow_partial_hessian"):
obj._func_arch_allow_partial_hessian = True
return obj
@property
def func_arch(self):
return self._func_arch
@func_arch.setter
def func_arch(self, flag):
self._func_arch = flag
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, flag):
self._debug = flag
@property
def func_arch_allow_partial_hessian(self):
return self._func_arch_allow_partial_hessian
@func_arch_allow_partial_hessian.setter
def func_arch_allow_partial_hessian(self, flag):
self._func_arch_allow_partial_hessian = flag
def __repr__(self):
return f"GraphManager: {self._shared_state}"
def init(self, func_arch, func_arch_allow_partial_hessian, debug):
self.func_arch = func_arch
self.func_arch_allow_partial_hessian = func_arch_allow_partial_hessian
self.debug = debug
| modulus-sym-main | modulus/sym/manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries
import torch
import logging
import numpy as np
from torch import nn
from typing import Dict, List, Optional, Callable, Union
# Import from Modulus
from modulus.sym.eq.derivatives import gradient
from modulus.sym.hydra import to_absolute_path, add_hydra_run_path
logger = logging.getLogger(__name__)
class Aggregator(nn.Module):
"""
Base class for loss aggregators
"""
def __init__(self, params, num_losses, weights):
super().__init__()
self.params: List[torch.Tensor] = list(params)
self.num_losses: int = num_losses
self.weights: Optional[Dict[str, float]] = weights
self.device: torch.device
self.device = list(set(p.device for p in self.params))[0]
self.init_loss: torch.Tensor = torch.tensor(0.0, device=self.device)
def weigh_losses_initialize(
weights: Optional[Dict[str, float]]
) -> Callable[
[Dict[str, torch.Tensor], Optional[Dict[str, float]]],
Dict[str, torch.Tensor],
]:
if weights is None:
def weigh_losses(
losses: Dict[str, torch.Tensor], weights: None
) -> Dict[str, torch.Tensor]:
return losses
else:
def weigh_losses(
losses: Dict[str, torch.Tensor], weights: Dict[str, float]
) -> Dict[str, torch.Tensor]:
for key in losses.keys():
if key not in weights.keys():
weights.update({key: 1.0})
losses = {key: weights[key] * losses[key] for key in losses.keys()}
return losses
return weigh_losses
self.weigh_losses = weigh_losses_initialize(self.weights)
class Sum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
# Add losses
for key in losses.keys():
loss += losses[key]
return loss
class GradNorm(Aggregator):
"""
GradNorm for loss aggregation
Reference: "Chen, Z., Badrinarayanan, V., Lee, C.Y. and Rabinovich, A., 2018, July.
Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks.
In International Conference on Machine Learning (pp. 794-803). PMLR."
"""
def __init__(self, params, num_losses, alpha=1.0, weights=None):
super().__init__(params, num_losses, weights)
self.alpha: float = alpha
self.lmbda: torch.nn.Parameter = nn.Parameter(
torch.zeros(num_losses, device=self.device)
)
self.register_buffer(
"init_losses", torch.zeros(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using the gradNorm algorithm
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# get initial losses
if step == 0:
for i, key in enumerate(losses.keys()):
self.init_losses[i] = losses[key].clone().detach()
with torch.no_grad():
normalizer: torch.Tensor = self.num_losses / (torch.exp(self.lmbda).sum())
for i in range(self.num_losses):
self.lmbda[i] = self.lmbda[i].clone() + torch.log(
normalizer.detach()
) # c*exp(x) = exp(log(c)+x)
lmbda_exp: torch.Tensor = torch.exp(self.lmbda)
# compute relative losses, inverse rate, and grad coefficient
losses_stacked: torch.Tensor = torch.stack(list(losses.values()))
with torch.no_grad():
relative_losses: torch.Tensor = torch.div(losses_stacked, self.init_losses)
inverse_rate: torch.Tensor = relative_losses / (relative_losses.mean())
gradnorm_coef: torch.Tensor = torch.pow(inverse_rate, self.alpha)
# compute gradient norm and average gradient norm
grads_norm: torch.Tensor = torch.zeros_like(self.init_losses)
shared_params: torch.Tensor = self.params[-2] # TODO generalize this
for i, key in enumerate(losses.keys()):
grads: torch.Tensor = gradient(losses[key], [shared_params])[0]
grads_norm[i] = torch.norm(lmbda_exp[i] * grads.detach(), p=2)
avg_grad: torch.Tensor = grads_norm.detach().mean()
# compute gradnorm & model losses
loss_gradnorm: torch.Tensor = torch.abs(
grads_norm - avg_grad * gradnorm_coef
).sum()
loss_model: torch.Tensor = (lmbda_exp.detach() * losses_stacked).sum()
loss: torch.Tensor = loss_gradnorm + loss_model
return loss
class ResNorm(Aggregator):
"""
Residual normalization for loss aggregation
Contributors: T. Nandi, D. Van Essendelft, M. A. Nabian
"""
def __init__(self, params, num_losses, alpha=1.0, weights=None):
super().__init__(params, num_losses, weights)
self.alpha: float = alpha
self.lmbda: torch.nn.Parameter = nn.Parameter(
torch.zeros(num_losses, device=self.device)
)
self.register_buffer(
"init_losses", torch.zeros(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using the ResNorm algorithm
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# get initial losses
if step == 0:
for i, key in enumerate(losses.keys()):
self.init_losses[i] = losses[key].clone().detach()
with torch.no_grad():
normalizer: torch.Tensor = self.num_losses / (torch.exp(self.lmbda).sum())
for i in range(self.num_losses):
self.lmbda[i] = self.lmbda[i].clone() + torch.log(
normalizer.detach()
) # c*exp(x) = exp(log(c)+x)
lmbda_exp: torch.Tensor = torch.exp(self.lmbda)
# compute relative losses, inverse rate, and grad coefficient
losses_stacked: torch.Tensor = torch.stack(list(losses.values()))
with torch.no_grad():
relative_losses: torch.Tensor = torch.div(losses_stacked, self.init_losses)
inverse_rate: torch.Tensor = relative_losses / (relative_losses.mean())
resnorm_coef: torch.Tensor = torch.pow(inverse_rate, self.alpha)
# compute residual norm and average residual norm
residuals: torch.Tensor = torch.zeros_like(self.init_losses)
for i, key in enumerate(losses.keys()):
residuals[i] = lmbda_exp[i] * losses[key].detach()
avg_residuals: torch.Tensor = losses_stacked.detach().mean()
# compute ResNorm & model losses
loss_resnorm: torch.Tensor = torch.abs(
residuals - avg_residuals * resnorm_coef
).sum()
loss_model: torch.Tensor = (lmbda_exp.detach() * losses_stacked).sum()
loss: torch.Tensor = loss_resnorm + loss_model
return loss
class HomoscedasticUncertainty(Aggregator):
"""
Homoscedastic task uncertainty for loss aggregation
Reference: "Reference: Kendall, A., Gal, Y. and Cipolla, R., 2018.
Multi-task learning using uncertainty to weigh losses for scene geometry and semantics.
In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 7482-7491)."
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
self.log_var: torch.nn.Parameter = nn.Parameter(
torch.zeros(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using homoscedastic task uncertainty
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
# Compute precision
precision: torch.Tensor = torch.exp(-self.log_var)
# Aggregate losses
for i, key in enumerate(losses.keys()):
loss += precision[i] * losses[key]
loss += self.log_var.sum()
loss /= 2.0
return loss
class LRAnnealing(Aggregator):
"""
Learning rate annealing for loss aggregation
References: "Wang, S., Teng, Y. and Perdikaris, P., 2020.
Understanding and mitigating gradient pathologies in physics-informed
neural networks. arXiv preprint arXiv:2001.04536.", and
"Jin, X., Cai, S., Li, H. and Karniadakis, G.E., 2021.
NSFnets (Navier-Stokes flow nets): Physics-informed neural networks for the
incompressible Navier-Stokes equations. Journal of Computational Physics, 426, p.109951."
"""
def __init__(
self,
params,
num_losses,
update_freq=1,
alpha=0.01,
ref_key=None,
eps=1e-8,
weights=None,
):
super().__init__(params, num_losses, weights)
self.update_freq: int = update_freq
self.alpha: float = alpha
self.ref_key: Union[str, None] = ref_key
self.eps: float = eps
self.register_buffer(
"lmbda_ema", torch.ones(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using the learning rate annealing algorithm
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
# Determine reference loss
if self.ref_key is None:
ref_idx = 0
else:
for i, key in enumerate(losses.keys()):
if self.ref_key in key:
ref_idx = i
break
# Update loss weights and aggregate losses
if step % self.update_freq == 0:
grads_mean: List[torch.Tensor] = []
# Compute the mean of each loss gradients
for key in losses.keys():
grads: List[torch.Tensor] = gradient(losses[key], self.params)
grads_flattened: List[torch.Tensor] = []
for i in range(len(grads)):
if grads[i] is not None:
grads_flattened.append(torch.abs(torch.flatten(grads[i])))
grads_mean.append((torch.mean(torch.cat(grads_flattened))))
# Compute the exponential moving average of weights and aggregate losses
for i, key in enumerate(losses.keys()):
with torch.no_grad():
self.lmbda_ema[i] *= 1.0 - self.alpha
self.lmbda_ema[i] += (
self.alpha * grads_mean[ref_idx] / (grads_mean[i] + self.eps)
)
loss += self.lmbda_ema[i].clone() * losses[key]
# Aggregate losses without update to loss weights
else:
for i, key in enumerate(losses.keys()):
loss += self.lmbda_ema[i] * losses[key]
return loss
class SoftAdapt(Aggregator):
"""
SoftAdapt for loss aggregation
Reference: "Heydari, A.A., Thompson, C.A. and Mehmood, A., 2019.
Softadapt: Techniques for adaptive loss weighting of neural networks with multi-part loss functions.
arXiv preprint arXiv: 1912.12355."
"""
def __init__(self, params, num_losses, eps=1e-8, weights=None):
super().__init__(params, num_losses, weights)
self.eps: float = eps
self.register_buffer(
"prev_losses", torch.zeros(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using the original variant of the softadapt algorithm
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
# Aggregate losses by summation at step 0
if step == 0:
for i, key in enumerate(losses.keys()):
loss += losses[key]
self.prev_losses[i] = losses[key].clone().detach()
# Aggregate losses using SoftAdapt for step > 0
else:
lmbda: torch.Tensor = torch.ones_like(self.prev_losses)
lmbda_sum: torch.Tensor = torch.zeros_like(self.init_loss)
losses_stacked: torch.Tensor = torch.stack(list(losses.values()))
normalizer: torch.Tensor = (losses_stacked / self.prev_losses).max()
for i, key in enumerate(losses.keys()):
with torch.no_grad():
lmbda[i] = torch.exp(
losses[key] / (self.prev_losses[i] + self.eps) - normalizer
)
lmbda_sum += lmbda[i]
loss += lmbda[i].clone() * losses[key]
self.prev_losses[i] = losses[key].clone().detach()
loss *= self.num_losses / (lmbda_sum + self.eps)
return loss
class Relobralo(Aggregator):
"""
Relative loss balancing with random lookback
Reference: "Bischof, R. and Kraus, M., 2021.
Multi-Objective Loss Balancing for Physics-Informed Deep Learning.
arXiv preprint arXiv:2110.09813."
"""
def __init__(
self, params, num_losses, alpha=0.95, beta=0.99, tau=1.0, eps=1e-8, weights=None
):
super().__init__(params, num_losses, weights)
self.alpha: float = alpha
self.beta: float = beta
self.tau: float = tau
self.eps: float = eps
self.register_buffer(
"init_losses", torch.zeros(self.num_losses, device=self.device)
)
self.register_buffer(
"prev_losses", torch.zeros(self.num_losses, device=self.device)
)
self.register_buffer(
"lmbda_ema", torch.ones(self.num_losses, device=self.device)
)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Weights and aggregates the losses using the ReLoBRaLo algorithm
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses.
step : int
Optimizer step.
Returns
-------
loss : torch.Tensor
Aggregated loss.
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
# Aggregate losses by summation at step 0
if step == 0:
for i, key in enumerate(losses.keys()):
loss += losses[key]
self.init_losses[i] = losses[key].clone().detach()
self.prev_losses[i] = losses[key].clone().detach()
# Aggregate losses using ReLoBRaLo for step > 0
else:
losses_stacked: torch.Tensor = torch.stack(list(losses.values()))
normalizer_prev: torch.Tensor = (
losses_stacked / (self.tau * self.prev_losses)
).max()
normalizer_init: torch.Tensor = (
losses_stacked / (self.tau * self.init_losses)
).max()
rho: torch.Tensor = torch.bernoulli(torch.tensor(self.beta))
with torch.no_grad():
lmbda_prev: torch.Tensor = torch.exp(
losses_stacked / (self.tau * self.prev_losses + self.eps)
- normalizer_prev
)
lmbda_init: torch.Tensor = torch.exp(
losses_stacked / (self.tau * self.init_losses + self.eps)
- normalizer_init
)
lmbda_prev *= self.num_losses / (lmbda_prev.sum() + self.eps)
lmbda_init *= self.num_losses / (lmbda_init.sum() + self.eps)
# Compute the exponential moving average of weights and aggregate losses
for i, key in enumerate(losses.keys()):
with torch.no_grad():
self.lmbda_ema[i] = self.alpha * (
rho * self.lmbda_ema[i].clone() + (1.0 - rho) * lmbda_init[i]
)
self.lmbda_ema[i] += (1.0 - self.alpha) * lmbda_prev[i]
loss += self.lmbda_ema[i].clone() * losses[key]
self.prev_losses[i] = losses[key].clone().detach()
return loss
class NTK(nn.Module):
def __init__(self, run_per_step: int = 1000, save_name: Union[str, None] = None):
super(NTK, self).__init__()
self.run_per_step = run_per_step
self.if_csv_head = True
self.save_name = (
to_absolute_path(add_hydra_run_path(save_name)) if save_name else None
)
if self.save_name:
logger.warning(
"Cuda graphs does not work when saving NTK values to file! Set `cuda_graphs` to false."
)
def group_ntk(self, model, losses):
# The item in this losses should scalar loss values after MSE, etc.
ntk_value = dict()
for key, loss in losses.items():
grad = torch.autograd.grad(
torch.sqrt(torch.abs(loss)),
model.parameters(),
retain_graph=True,
allow_unused=True,
)
ntk_value[key] = torch.sqrt(
torch.sum(
torch.stack(
[torch.sum(t.detach() ** 2) for t in grad if t is not None],
dim=0,
)
)
)
return ntk_value
def save_ntk(self, ntk_dict, step):
import pandas as pd # TODO: Remove
output_dict = {}
for key, value in ntk_dict.items():
output_dict[key] = value.cpu().numpy()
df = pd.DataFrame(output_dict, index=[step])
df.to_csv(self.save_name + ".csv", mode="a", header=self.if_csv_head)
self.if_csv_head = False
def forward(self, constraints, ntk_weights, step):
losses = dict()
dict_constraint_losses = dict()
ntk_sum = 0
# Execute constraint forward passes
for key, constraint in constraints.items():
# TODO: Test streaming here
torch.cuda.nvtx.range_push(f"Running Constraint {key}")
constraint.forward()
torch.cuda.nvtx.range_pop()
for key, constraint in constraints.items():
# compute losses
constraint_losses = constraint.loss(step)
if (step % self.run_per_step == 0) and (step > 0):
ntk_dict = self.group_ntk(constraint.model, constraint_losses)
else:
ntk_dict = None
if ntk_dict is not None:
ntk_weights[key] = ntk_dict
if ntk_weights.get(key) is not None:
ntk_sum += torch.sum(
torch.stack(list(ntk_weights[key].values()), dim=0)
)
dict_constraint_losses[key] = constraint_losses
if step == 0: # May not work on restarts
ntk_sum = 1.0
if self.save_name and (step % self.run_per_step == 0) and (step > 0):
self.save_ntk(
{
d_key + "_" + k: v
for d_key, d in ntk_weights.items()
for k, v in d.items()
},
step,
)
for key, constraint_losses in dict_constraint_losses.items():
# add together losses of like kind
for loss_key, value in constraint_losses.items():
if (
ntk_weights.get(key) is None
or ntk_weights[key].get(loss_key) is None
):
ntk_weight = ntk_sum / 1.0
else:
ntk_weight = ntk_sum / ntk_weights[key][loss_key]
if loss_key not in list(losses.keys()):
losses[loss_key] = ntk_weight * value
else:
losses[loss_key] += ntk_weight * value
return losses, ntk_weights
| modulus-sym-main | modulus/sym/loss/aggregator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .loss import (
Loss,
PointwiseLossNorm,
IntegralLossNorm,
DecayedPointwiseLossNorm,
DecayedIntegralLossNorm,
)
| modulus-sym-main | modulus/sym/loss/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pathlib
import torch.nn as nn
from torch import Tensor
from typing import Dict, Tuple, List, Union
from torch.autograd import Function
class LossL2(Function):
@staticmethod
def forward(
ctx,
pred_outvar: Tensor,
true_outvar: Tensor,
lambda_weighting: Tensor,
area: Tensor,
):
ctx.save_for_backward(pred_outvar, true_outvar, lambda_weighting, area)
loss = pde_cpp.l2_loss_forward(pred_outvar, true_outvar, lambda_weighting, area)
return loss
@staticmethod
def backward(ctx, grad_output):
pred_outvar, true_outvar, lambda_weighting, area = ctx.saved_tensors
outputs = pde_cpp.l2_loss_backward(
grad_output, pred_outvar, true_outvar, lambda_weighting, area
)
return outputs[0], None, None, None
class Loss(nn.Module):
"""
Base class for all loss functions
"""
def __init__(self):
super().__init__()
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, Tensor]:
raise NotImplementedError("Subclass of Loss needs to implement this")
class PointwiseLossNorm(Loss):
"""
L-p loss function for pointwise data
Computes the p-th order loss of each output tensor
Parameters
----------
ord : int
Order of the loss. For example, `ord=2` would be the L2 loss.
"""
def __init__(self, ord: int = 2):
super().__init__()
self.ord: int = ord
@staticmethod
def _loss(
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
ord: float,
) -> Dict[str, Tensor]:
losses = {}
for key, value in pred_outvar.items():
l = lambda_weighting[key] * torch.abs(
pred_outvar[key] - true_outvar[key]
).pow(ord)
if "area" in invar.keys():
l *= invar["area"]
losses[key] = l.sum()
return losses
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, Tensor]:
return PointwiseLossNorm._loss(
invar, pred_outvar, true_outvar, lambda_weighting, step, self.ord
)
class IntegralLossNorm(Loss):
"""
L-p loss function for integral data
Computes the p-th order loss of each output tensor
Parameters
----------
ord : int
Order of the loss. For example, `ord=2` would be the L2 loss.
"""
def __init__(self, ord: int = 2):
super().__init__()
self.ord: int = ord
@staticmethod
def _loss(
list_invar: List[Dict[str, Tensor]],
list_pred_outvar: List[Dict[str, Tensor]],
list_true_outvar: List[Dict[str, Tensor]],
list_lambda_weighting: List[Dict[str, Tensor]],
step: int,
ord: float,
) -> Dict[str, Tensor]:
# compute integral losses
losses = {key: 0 for key in list_pred_outvar[0].keys()}
for invar, pred_outvar, true_outvar, lambda_weighting in zip(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting
):
for key in pred_outvar.keys():
losses[key] += (
lambda_weighting[key]
* torch.abs(
true_outvar[key] - (invar["area"] * pred_outvar[key]).sum()
).pow(ord)
).sum()
return losses
losses = {}
for key, value in pred_outvar.items():
l = lambda_weighting[key] * torch.abs(
pred_outvar[key] - true_outvar[key]
).pow(ord)
if "area" in invar.keys():
l *= invar["area"]
losses[key] = l.sum()
return losses
def forward(
self,
list_invar: List[Dict[str, Tensor]],
list_pred_outvar: List[Dict[str, Tensor]],
list_true_outvar: List[Dict[str, Tensor]],
list_lambda_weighting: List[Dict[str, Tensor]],
step: int,
) -> Dict[str, Tensor]:
return IntegralLossNorm._loss(
list_invar,
list_pred_outvar,
list_true_outvar,
list_lambda_weighting,
step,
self.ord,
)
class DecayedLossNorm(Loss):
"""
Base class for decayed loss norm
"""
def __init__(
self,
start_ord: int = 2,
end_ord: int = 1,
decay_steps: int = 1000,
decay_rate: float = 0.95,
):
super().__init__()
self.start_ord: int = start_ord
self.end_ord: int = end_ord
self.decay_steps: int = decay_steps
self.decay_rate: int = decay_rate
def ord(self, step):
return self.start_ord - (self.start_ord - self.end_ord) * (
1.0 - self.decay_rate ** (step / self.decay_steps)
)
class DecayedPointwiseLossNorm(DecayedLossNorm):
"""
Loss function for pointwise data where the norm of
the loss is decayed from a start value to an end value.
Parameters
----------
start_ord : int
Order of the loss when current iteration is zero.
end_ord : int
Order of the loss to decay to.
decay_steps : int
Number of steps to take for each `decay_rate`.
decay_rate :
The rate of decay from `start_ord` to `end_ord`. The current ord
will be given by `ord = start_ord - (start_ord - end_ord) * (1.0 - decay_rate**(current_step / decay_steps))`.
"""
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, Tensor]:
return PointwiseLossNorm._loss(
invar, pred_outvar, true_outvar, lambda_weighting, step, self.ord(step)
)
class DecayedIntegralLossNorm(DecayedLossNorm):
"""
Loss function for integral data where the norm of
the loss is decayed from a start value to an end value.
Parameters
----------
start_ord : int
Order of the loss when current iteration is zero.
end_ord : int
Order of the loss to decay to.
decay_steps : int
Number of steps to take for each `decay_rate`.
decay_rate :
The rate of decay from `start_ord` to `end_ord`. The current ord
will be given by `ord = start_ord - (start_ord - end_ord) * (1.0 - decay_rate**(current_step / decay_steps))`.
"""
def forward(
self,
list_invar: List[Dict[str, Tensor]],
list_pred_outvar: List[Dict[str, Tensor]],
list_true_outvar: List[Dict[str, Tensor]],
list_lambda_weighting: List[Dict[str, Tensor]],
step: int,
) -> Dict[str, Tensor]:
return IntegralLossNorm._loss(
list_invar,
list_pred_outvar,
list_true_outvar,
list_lambda_weighting,
step,
self.ord(step),
)
class CausalLossNorm(Loss):
"""
Causal loss function for pointwise data
Computes the p-th order loss of each output tensor
Parameters
----------
ord : int
Order of the loss. For example, `ord=2` would be the L2 loss.
eps: float
Causal parameter determining the slopeness of the temporal weights. "eps=1.0" would be default value.
n_chunks: int
Number of chunks splitting the temporal domain evenly.
"""
def __init__(self, ord: int = 2, eps: float = 1.0, n_chunks=10):
super().__init__()
self.ord: int = ord
self.eps: float = eps
self.n_chunks: int = n_chunks
@staticmethod
def _loss(
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
ord: float,
eps: float,
n_chunks: int,
) -> Dict[str, Tensor]:
losses = {}
for key, value in pred_outvar.items():
l = lambda_weighting[key] * torch.abs(
pred_outvar[key] - true_outvar[key]
).pow(ord)
if "area" in invar.keys():
l *= invar["area"]
# batch size should be divided by the number of chunks
if l.shape[0] % n_chunks != 0:
raise ValueError(
"The batch size must be divided by the number of chunks"
)
# divide the loss values into chunks
l = l.reshape(n_chunks, -1)
l = l.sum(axis=-1)
# compute causal temporal weights
with torch.no_grad():
w = torch.exp(-eps * torch.cumsum(l, dim=0))
w = w / w[0]
l = w * l
losses[key] = l.sum()
return losses
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, Tensor]:
return CausalLossNorm._loss(
invar,
pred_outvar,
true_outvar,
lambda_weighting,
step,
self.ord,
self.eps,
self.n_chunks,
)
| modulus-sym-main | modulus/sym/loss/loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .manager import DistributedManager
| modulus-sym-main | modulus/sym/distributed/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import torch.distributed as dist
def get_memory_format(tensor):
if tensor.is_contiguous(memory_format=torch.channels_last):
return torch.channels_last
else:
return torch.contiguous_format
def pad_helper(tensor, dim, new_size, mode="zero"):
ndim = tensor.ndim
dim = (dim + ndim) % ndim
ndim_pad = ndim - dim
output_shape = [0 for _ in range(2 * ndim_pad)]
orig_size = tensor.shape[dim]
output_shape[1] = new_size - orig_size
tensor_pad = F.pad(tensor, output_shape, mode="constant", value=0.0)
if mode == "conj":
lhs_slice = [
slice(0, x) if idx != dim else slice(orig_size, new_size)
for idx, x in enumerate(tensor.shape)
]
rhs_slice = [
slice(0, x) if idx != dim else slice(1, output_shape[1] + 1)
for idx, x in enumerate(tensor.shape)
]
tensor_pad[lhs_slice] = torch.flip(
torch.conj(tensor_pad[rhs_slice]), dims=[dim]
)
return tensor_pad
def truncate_helper(tensor, dim, new_size):
input_format = get_memory_format(tensor)
ndim = tensor.ndim
dim = (dim + ndim) % ndim
output_slice = [
slice(0, x) if idx != dim else slice(0, new_size)
for idx, x in enumerate(tensor.shape)
]
tensor_trunc = tensor[output_slice].contiguous(memory_format=input_format)
return tensor_trunc
def split_tensor_along_dim(tensor, dim, num_chunks):
assert (
dim < tensor.dim()
), f"Error, tensor dimension is {tensor.dim()} which cannot be split along {dim}"
assert (
tensor.shape[dim] % num_chunks == 0
), f"Error, cannot split dim {dim} evenly. Dim size is \
{tensor.shape[dim]} and requested numnber of splits is {num_chunks}"
chunk_size = tensor.shape[dim] // num_chunks
tensor_list = torch.split(tensor, chunk_size, dim=dim)
return tensor_list
# distributed primitives
def _transpose(tensor, dim0, dim1, group=None, async_op=False):
# get input format
input_format = get_memory_format(tensor)
# get comm params
comm_size = dist.get_world_size(group=group)
# split and local transposition
split_size = tensor.shape[dim0] // comm_size
x_send = [
y.contiguous(memory_format=input_format)
for y in torch.split(tensor, split_size, dim=dim0)
]
x_recv = [torch.empty_like(x_send[0]) for _ in range(comm_size)]
# global transposition
req = dist.all_to_all(x_recv, x_send, group=group, async_op=async_op)
return x_recv, req
def _reduce(input_, use_fp32=True, group=None):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
if use_fp32:
dtype = input_.dtype
inputf_ = input_.float()
dist.all_reduce(inputf_, group=group)
input_ = inputf_.to(dtype)
else:
dist.all_reduce(input_, group=group)
return input_
def _split(input_, dim_, group=None):
"""Split the tensor along its last dimension and keep the corresponding slice."""
# get input format
input_format = get_memory_format(input_)
# Bypass the function if we are using only 1 GPU.
comm_size = dist.get_world_size(group=group)
if comm_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_dim(input_, dim_, comm_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous(memory_format=input_format)
return output
def _gather(input_, dim_, group=None):
"""Gather tensors and concatinate along the last dimension."""
# get input format
input_format = get_memory_format(input_)
comm_size = dist.get_world_size(group=group)
# Bypass the function if we are using only 1 GPU.
if comm_size == 1:
return input_
# sanity checks
assert (
dim_ < input_.dim()
), f"Error, cannot gather along {dim_} for tensor with {input_.dim()} dimensions."
# Size and dimension.
comm_rank = dist.get_rank(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(comm_size)]
tensor_list[comm_rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim_).contiguous(memory_format=input_format)
return output
| modulus-sym-main | modulus/sym/distributed/helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import logging
import os
import time
import numpy as np
logger = logging.getLogger("__name__")
# Create singleton DistributedManager class
class DistributedManager(object):
_shared_state = {}
def __new__(cls):
obj = super(DistributedManager, cls).__new__(cls)
obj.__dict__ = cls._shared_state
# Set the defaults
if not hasattr(obj, "_rank"):
obj._rank = 0
if not hasattr(obj, "_world_size"):
obj._world_size = 1
if not hasattr(obj, "_local_rank"):
obj._local_rank = 0
if not hasattr(obj, "_distributed"):
obj._distributed = False
if not hasattr(obj, "_device"):
obj._device = torch.device(
f"cuda:0" if torch.cuda.is_available() else "cpu"
)
if not hasattr(obj, "_cuda"):
obj._cuda = torch.cuda.is_available()
if not hasattr(obj, "_broadcast_buffers"):
obj._broadcast_buffers = False
if not hasattr(obj, "_find_unused_parameters"):
obj._find_unused_parameters = False
if not hasattr(obj, "_cuda_graphs"):
obj._cuda_graphs = False
return obj
@property
def rank(self):
return self._rank
@property
def local_rank(self):
return self._local_rank
@property
def world_size(self):
return self._world_size
@property
def device(self):
return self._device
@property
def distributed(self):
return self._distributed
@property
def cuda(self):
return self._cuda
@property
def group_names(self):
"""
Returns a list of all named process groups created
"""
return self._groups.keys()
def group(self, name=None):
"""
Returns a process group with the given name
If name is None, group is also None indicating the default process group
If named group does not exist, returns None also
"""
if name in self._groups.keys():
return self._groups[name]
else:
return None
def group_size(self, name=None):
"""
Returns the size of named process group
"""
if name is None:
return self._world_size
group = self.group(name)
return dist.get_world_size(group=group)
def group_rank(self, name=None):
"""
Returns the rank in named process group
"""
if name is None:
return self._rank
group = self.group(name)
return dist.get_rank(group=group)
def group_name(self, group=None):
"""
Returns the name of process group
"""
if group is None:
return None
return self._group_names[group]
@property
def broadcast_buffers(self):
return self._broadcast_buffers
@broadcast_buffers.setter
def broadcast_buffers(self, broadcast: bool):
self._broadcast_buffers = broadcast
@property
def find_unused_parameters(self):
return self._find_unused_parameters
@find_unused_parameters.setter
def find_unused_parameters(self, find_params: bool):
if find_params:
# Logger may not be config'd here yet
logger.warning(
"Setting `find_unused_parameters` in DDP to true, use only if necessary."
)
self._find_unused_parameters = find_params
@property
def cuda_graphs(self):
return self._cuda_graphs
@cuda_graphs.setter
def cuda_graphs(self, graphs: bool):
# Function for any modifications needed for DDP using cuda graphs
if graphs and self._find_unused_parameters:
# Logger may not be config'd here yet
logger.warning(
"DDP `find_unused_parameters` must be false for CUDA graphs."
)
raise ValueError(
"`cuda_graphs` and `find_unused_parameters` cannot both be true"
)
self._cuda_graphs = graphs
@staticmethod
def get_available_backend():
if torch.cuda.is_available() and torch.distributed.is_nccl_available():
return "nccl"
else:
return "gloo"
@staticmethod
def initialize_env():
rank = int(os.environ.get("RANK"))
world_size = int(os.environ.get("WORLD_SIZE"))
if "LOCAL_RANK" in os.environ:
local_rank = int(os.environ.get("LOCAL_RANK"))
else:
local_rank = rank % torch.cuda.device_count()
addr = os.environ.get("MASTER_ADDR")
port = os.environ.get("MASTER_PORT")
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
)
@staticmethod
def initialize_open_mpi(addr, port):
rank = int(os.environ.get("OMPI_COMM_WORLD_RANK"))
world_size = int(os.environ.get("OMPI_COMM_WORLD_SIZE"))
local_rank = int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK"))
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
method="openmpi",
)
@staticmethod
def initialize_slurm(port):
rank = int(os.environ.get("SLURM_PROCID"))
world_size = int(os.environ.get("SLURM_NPROCS"))
local_rank = int(os.environ.get("SLURM_LOCALID"))
addr = os.environ.get("SLURM_LAUNCH_NODE_IPADDR")
DistributedManager.setup(
rank=rank,
world_size=world_size,
local_rank=local_rank,
addr=addr,
port=port,
backend=DistributedManager.get_available_backend(),
method="slurm",
)
@staticmethod
def initialize():
addr = os.getenv("MASTER_ADDR", "localhost")
port = os.getenv("MASTER_PORT", "12355")
# https://pytorch.org/docs/master/notes/cuda.html#id5
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
try:
DistributedManager.initialize_env()
except:
if "SLURM_PROCID" in os.environ:
DistributedManager.initialize_slurm(port)
elif "OMPI_COMM_WORLD_RANK" in os.environ:
DistributedManager.initialize_open_mpi(addr, port)
# Set per rank numpy random seed for data sampling
np.random.seed(seed=DistributedManager().rank)
manager = DistributedManager()
if manager.distributed:
print(
f'Initialized process {manager.rank} of {manager.world_size} using method "{manager._initialization_method}". Device set to {str(manager.device)}'
)
@staticmethod
def setup(
rank=0,
world_size=1,
local_rank=None,
addr="localhost",
port="12355",
backend="nccl",
method="env",
):
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = str(port)
manager = DistributedManager()
manager._distributed = (world_size > 1) and torch.distributed.is_available()
if manager._distributed:
# Update rank and world_size if using distributed
manager._rank = rank
manager._world_size = world_size
if local_rank is None:
manager._local_rank = rank % torch.cuda.device_count()
else:
manager._local_rank = local_rank
# Setup distributed process group
# time.sleep(1)
dist.init_process_group(
backend, rank=manager.rank, world_size=manager.world_size
)
manager._groups = {}
manager._group_ranks = {}
manager._group_names = {}
manager._device = torch.device(
f"cuda:{manager.local_rank}" if torch.cuda.is_available() else "cpu"
)
# Needed for cuda graphs
if torch.cuda.is_available():
torch.cuda.set_device(manager.local_rank)
manager._initialization_method = method
# Set device for this process and empty cache to optimize memory usage
torch.cuda.device(manager.device)
torch.cuda.empty_cache()
@staticmethod
def create_process_subgroup(name: str, size: int, group_name=None, verbose=False):
manager = DistributedManager()
if not manager.distributed:
return None
assert name not in manager._groups, f"Group with name {name} already exists"
# Get parent group's params
group = manager._group[group_name] if group_name else None
group_size = dist.get_world_size(group=group)
group_rank = dist.get_rank(group=group)
num_groups = manager.world_size // group_size
# Get number of sub-groups per parent group
assert (
group_size % size == 0
), f"Cannot divide group size {group_size} evenly into subgroups of size {size}"
num_subgroups = group_size // size
# Create all the sub-groups
# Note: all ranks in the job need to create all sub-groups in
# the same order even if a rank is not part of a sub-group
manager._group_ranks[name] = []
for g in range(num_groups):
for i in range(num_subgroups):
# Get global ranks that are part of this sub-group
start = i * size
end = start + size
if group_name:
ranks = manager._group_ranks[group_name][g][start:end]
else:
ranks = list(range(start, end))
# Create sub-group and keep track of ranks
tmp_group = dist.new_group(ranks=ranks)
manager._group_ranks[name].append(ranks)
if manager.rank in ranks:
# Set group in manager only if this rank is part of the group
manager._groups[name] = tmp_group
manager._group_names[tmp_group] = name
if verbose and manager.rank == 0:
print(f"Process group '{name}':")
for grp in manager._group_ranks[name]:
print(" ", grp)
@staticmethod
def create_orthogonal_process_group(name: str, group_name: str, verbose=False):
manager = DistributedManager()
if not manager.distributed:
return None
assert (
group_name in manager._groups
), f"Group with name {group_name} does not exist"
assert name not in manager._groups, f"Group with name {name} already exists"
group_ranks = manager._group_ranks[group_name]
orthogonal_ranks = [list(i) for i in zip(*group_ranks)]
for ranks in orthogonal_ranks:
tmp_group = dist.new_group(ranks=ranks)
if manager.rank in ranks:
# Set group in manager only if this rank is part of the group
manager._groups[name] = tmp_group
manager._group_names[tmp_group] = name
manager._group_ranks[name] = orthogonal_ranks
if verbose and manager.rank == 0:
print(f"Process group '{name}':")
for grp in manager._group_ranks[name]:
print(" ", grp)
@staticmethod
def cleanup():
dist.destroy_process_group()
| modulus-sym-main | modulus/sym/distributed/manager.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.