python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
"""Adapted from https://github.com/NVIDIA/pix2pixHD/blob/master/models/networks.py
with modifications made for this work.
Original copyright:
Copyright (C) 2019 NVIDIA Corporation. Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu.
"""
import torch
import torch.nn as nn
import functools
import numpy as np
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False, use_cond=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
self.use_cond = use_cond
norm_layer = get_norm_layer(norm_layer)
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input_img, landmarks=None):
if self.use_cond:
input = torch.cat((input_img, landmarks), dim=1)
else:
input = input_img
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
| UnsupervisedLandmarkLearning-master | models/discriminator.py |
"""
This script should exactly replicate the matlab-based evaluation provided in the BBCPose
eval toolkit, thresholded at a 6 pixel radius
"""
import scipy.io as sio
import numpy as np
import argparse
import os
def eval_kpts(my_preds, anno_gt):
preds = 0.5 * (my_preds['my_pred']-1) + 1
gt = 0.5 * (anno_gt-1) + 1
err = np.sqrt(np.sum((preds - gt) ** 2, axis=0))
# error if regressed position is greater than 6 pixels away from ground truth
err[err <= 6] = 1
err[err > 6] = 0
return err
def main(results_path, test_anno_path= None, val_anno_path=None):
if test_anno_path is not None: # if test annotations are provided
test_pred_results = os.path.join(results_path, 'preds_test.mat')
test_gt = sio.loadmat(test_anno_path)
test_preds = sio.loadmat(test_pred_results)
err = eval_kpts(test_preds, test_gt['results']['gt'][0, 0])
print('Test', err.mean())
if val_anno_path is not None: # if validation set annotations are provided
val_pred_results = os.path.join(results_path, 'preds_val.mat')
val_gt = sio.loadmat(val_anno_path)
val_preds = sio.loadmat(val_pred_results)
err = eval_kpts(val_preds, val_gt['my_pred'])
print('Val', err.mean())
if __name__ == '__main__':
# load yaml
parser = argparse.ArgumentParser(description='')
parser.add_argument('--results_path', type=str, help="Path to where the regressed landmark results are stored")
parser.add_argument('--test_anno_path', type=str, default=None, help="Location of the results.mat file provided by the bbcpos evaluation toolkit")
parser.add_argument('--val_anno_path', type=str, default=None, help="Location of the validation set annotations stored in the same format as results.mat")
args = parser.parse_args()
main(args.results_path, args.test_anno_path, args.val_anno_path)
| UnsupervisedLandmarkLearning-master | scripts/eval_supervised_landmarks_bbc.py |
"""Script for fitting the regression paramaters mapping the landmarks to the
annotated keypoints on BBC. Regression results are saved in a .mat file.
Run eval_supervised_landmarks_bbc.py to evaluate the regressed keypoints.
"""
import numpy as np
import sklearn.linear_model
import argparse
import scipy.io as sio
import pickle
import os
def save_results(y_predict, all_bboxes, output_path):
box_w = all_bboxes[:, 2] - all_bboxes[:, 0]
box_h = all_bboxes[:, 3] - all_bboxes[:, 1]
box_w = np.expand_dims(np.expand_dims(box_w, axis=1), axis=2)
box_h = np.expand_dims(np.expand_dims(box_h, axis=1), axis=2)
# B x 2 X 1
box_wh = np.concatenate((box_w, box_h), 1)
box_x_min = all_bboxes[:, 0]
box_y_min = all_bboxes[:, 1]
box_x_min = np.expand_dims(np.expand_dims(box_x_min, axis=1), axis=2)
box_y_min = np.expand_dims(np.expand_dims(box_y_min, axis=1), axis=2)
box_mins = np.concatenate((box_x_min, box_y_min), 1)
y_predict = y_predict * box_wh + box_wh/2
y_predict += box_mins
y_predict = np.transpose(y_predict, (1, 2, 0))
predictions = {}
predictions['my_pred'] = y_predict
print(output_path)
sio.savemat(output_path + '.mat', predictions)
def load_encoding(path):
print(path)
with open(path, 'rb') as handle:
files = pickle.load(handle)
return files
def main(gaussian_path, output_path):
training_data = load_encoding(os.path.join(gaussian_path, 'train', 'gaussians.pkl3'))
testing_data = load_encoding(os.path.join(gaussian_path, 'test', 'gaussians.pkl3'))
val_data = load_encoding(os.path.join(gaussian_path, 'val', 'gaussians.pkl3'))
X_train, Y_train = training_data['predictions_mean'], training_data['gt']
X_val = val_data['predictions_mean']
X_test = testing_data['predictions_mean']
# Following the same procedure as https://github.com/tomasjakab/imm/blob/dev/scripts/test.py
# from Tomas Jakab
regr = sklearn.linear_model.Ridge(alpha=0.0, fit_intercept=False)
print(X_train.shape, Y_train.shape)
print("Fitting...")
_ = regr.fit(X_train, Y_train)
print("Predicting on test...")
y_predict_test = regr.predict(X_test)
print("Predicting on Validation...")
y_predict_val = regr.predict(X_val)
n_keypoints = 7
y_predict_test_rshp = y_predict_test.reshape(-1, 2, n_keypoints)
y_predict_val_rshp = y_predict_val.reshape(-1, 2, n_keypoints)
save_results(y_predict_test_rshp, testing_data['bboxes'], os.path.join(output_path, 'preds_test'))
save_results(y_predict_val_rshp, val_data['bboxes'], os.path.join(output_path, 'preds_val'))
if __name__ == '__main__':
# load yaml
parser = argparse.ArgumentParser(description='')
parser.add_argument('--gaussian_path', type=str)
parser.add_argument('--out_path', type=str)
args = parser.parse_args()
main(args.gaussian_path, args.out_path)
| UnsupervisedLandmarkLearning-master | scripts/map_to_supervised_landmarks_bbc.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from setuptools import setup, find_packages
import os
distribution_name = 'framework-reproducibility'
package_name = 'fwr13y'
# This file needs to be executed during installation. It's not possible to
# import the full package during installation because it will fail to import if
# all the supported frameworks have not been installed. By temporarility
# appending to sys.path, it's possible to just import from the version module.
import sys
sys.path.append(package_name)
from version import __version__ as version
sys.path.remove(package_name)
packages = (
['fwr13y'] + ['fwr13y.' +
subpackage for subpackage in find_packages(where='fwr13y')])
readme = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"pypi_description.md")
with open(readme, "r") as fp:
long_description = fp.read()
description = ("Providing reproducibility in deep learning frameworks")
url = "https://github.com/NVIDIA/%s" % distribution_name
install_requires = [] # intentionally not including the framework packages
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python'
]
keywords = ("framework tensorflow gpu deep-learning determinism "
"reproducibility pytorch seed seeder noise noise-reduction "
"variance-reduction atomics ngc gpu-determinism deterministic-ops "
"frameworks gpu-support d9m r13y fwr13y")
setup(
name = distribution_name,
version = version,
packages = packages,
url = url,
license = 'Apache 2.0',
author = 'NVIDIA',
author_email = '[email protected]',
description = description,
long_description = long_description,
long_description_content_type = 'text/markdown',
install_requires = install_requires,
classifiers = classifiers,
keywords = keywords,
platforms = ['TensorFlow', 'PyTorch', 'PaddlePaddle']
)
| framework-reproducibility-master | setup.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import csv
import numpy as np
from os.path import abspath, dirname
import sys
sys.path.append(dirname(abspath(__file__))+'/../../')
import fwr13y.seeder.pyt as seeder
class GetSeed:
def __init__(self):
self.seed = None
def get_seed(self, seed):
self.seed = seed
def test_seeder():
csv_data = []
# header
csv_data.append(["master_seed", "local_rank", "epoch", "task", "gen seed"])
def generate_seeds():
for local_rank in range(8):
seeder.init(master_seed=master_seed, ngpus=8, local_rank=local_rank)
gs = GetSeed()
seeder.register_generator(gs.get_seed)
seeder.reseed(0, 0)
seed = gs.seed
csv_data.append([master_seed, local_rank, 0, 0, seed])
for epoch in range(10):
seeder.reseed(1, epoch)
seed = gs.seed
csv_data.append([master_seed, local_rank, 0, 0, seed])
for master_seed in np.random.randint(low=0, high=100000, size=10):
generate_seeds()
for master_seed in range(10):
generate_seeds()
with open("generated_seeds.csv", "w") as csvfile:
writer = csv.writer(csvfile)
for row in csv_data:
writer.writerow(row)
def main():
test_seeder()
if __name__ == "__main__":
main()
| framework-reproducibility-master | test/seeder/test_seeder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import argparse
import pandas as pd
from print_stats import print_stats
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--log-file",
type=str,
help="Input log file",
)
return parser
def main():
parser = parse_args()
args = parser.parse_args()
pdata = pd.read_csv(args.log_file, index_col=0)
print_stats(pdata)
if __name__ == "__main__":
main()
| framework-reproducibility-master | test/seeder/test_print_stats.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import argparse
import pandas as pd
from convergence_stats import get_convergence_stats
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--log-file",
type=str,
help="Input log file",
)
return parser
def main():
parser = parse_args()
args = parser.parse_args()
pdata = pd.read_csv(args.log_file, index_col=0)
get_convergence_stats(pdata)
if __name__ == "__main__":
main()
| framework-reproducibility-master | test/seeder/test_convergence_stats.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import numpy as np
from scipy import stats
import pandas as pd
def get_convergence_stats(pdata : pd.DataFrame):
# assuming the first columnt contains hopper val loss and
# columns 2,3... contain ampere val loss
ds = pdata.iloc[:, 0] - np.mean(pdata.iloc[:, 1:], axis=1)
mean_loss = np.mean(ds)
std_loss = np.std(ds)
# ll - lower_limit; ul - upper_limit
ll95, ul95 = stats.t.interval(0.95, len(ds) - 1, mean_loss, std_loss/np.sqrt(len(ds)))
print(f"Range for 95% CI: ( {ll95:.8f}, {ul95:.8f} )")
print(f"95% CI contains 0:", "yes" if ll95 < 0 < ul95 else "no")
ll99, ul99 = stats.t.interval(0.99, len(ds) - 1, mean_loss, std_loss/np.sqrt(len(ds)))
print(f"Range for 99% CI: ( {ll99:.8f}, {ul99:.8f} )")
print(f"99% CI contains 0:", "yes" if ll99 < 0 < ul99 else "no")
| framework-reproducibility-master | test/seeder/convergence_stats.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import random
import csv
import numpy as np
import torch
from os.path import abspath, dirname
import sys
sys.path.append(dirname(abspath(__file__)) + "/../../")
import fwr13y.seeder.pyt as seeder
class GetSeed:
def __init__(self):
self.seed = None
def get_seed(self, seed):
self.seed = seed
def call_torch_rand():
return torch.rand(1).item()
def call_np_rand():
return np.random.rand(1)[0]
def call_random():
return random.random()
def generate_random_numbers(
seeds, csv_filename, num_iterations_per_seed, seed_fn, random_fn
):
csv_data = []
# header
if len(seeds) == 1:
csv_data.append(["random value"])
else:
csv_data.append(["seed", "random value"])
for s in seeds:
seed_fn(s)
for i in range(num_iterations_per_seed):
if len(seeds) > 1:
csv_data.append([s, random_fn()])
else:
csv_data.append([random_fn()])
csvfile = csv_filename
with open(csvfile, "w") as f:
writer = csv.writer(f)
for row in csv_data:
writer.writerow(row)
def test_randomness(master_seed):
# seedeing functions for random number generators
seed_functions = (torch.manual_seed, np.random.seed, random.seed)
random_functions = (call_torch_rand, call_np_rand, call_random)
rng_names = ("torch", "numpy", "random")
# num_seeds corresponds to the number of epochs
# testing random number generation for one seed
num_iters = 1000000
for seed_fn, random_fn, name in zip(seed_functions, random_functions, rng_names):
csv_filename = (
"rand_"
+ name
+ "_one_seed_"
+ str(master_seed)
+ "_"
+ str(num_iters)
+ "_"
+ str(master_seed)
+ ".csv"
)
generate_random_numbers(
[master_seed], csv_filename, num_iters, seed_fn, random_fn
)
# testing random number generation for 1000 seeds
num_seeds = 1000
num_iters_per_seed = 1000
# seeds are generated:
# - from sequence 0 .. num_seeds-1
seeds_from_seq = []
for s in range(num_seeds):
seeds_from_seq.append(s)
for seed_fn, random_fn, name in zip(seed_functions, random_functions, rng_names):
filename = (
"rand_"
+ name
+ "_1000seeds_from_seq_1000iters_"
+ str(master_seed)
+ ".csv"
)
generate_random_numbers(
seeds_from_seq, filename, num_iters_per_seed, seed_fn, random_fn
)
# - from seedGen in seeder.py
seeds_from_seedGen = []
sg = seeder.SeedGen(master_seed=master_seed, ngpus=1, local_rank=0)
for s in range(num_seeds):
seeds_from_seedGen.append(sg(1, s))
for seed_fn, random_fn, name in zip(seed_functions, random_functions, rng_names):
csv_filename = (
"rand_"
+ name
+ "_1000seeds_from_seedGen_1000iters_"
+ str(master_seed)
+ ".csv"
)
generate_random_numbers(
seeds_from_seedGen, csv_filename, num_iters_per_seed, seed_fn, random_fn
)
def main():
test_randomness(int(sys.argv[1]))
if __name__ == "__main__":
main()
| framework-reproducibility-master | test/seeder/test_generated_random_numbers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from pathlib import Path
import json
import argparse
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def get_train_losses(logs, log_data, limits):
metric = "train_loss"
for logfile in logs:
with Path(logfile).open() as f:
data = []
global_steps = []
lines = f.readlines()
for line in lines:
# discarding the "DLL " prefix from each line gives us a valid json string
record = json.loads(line[len("DLL ") :])
# step with format [x,y,z] is a training step
if "step" in record and len(record["step"]) == 3:
s = record["step"]
if metric in record["data"]:
m = record["data"][metric]
data.append(m)
# record global step
gs = (s[0] - 1) * s[2] + s[1]
global_steps.append(gs)
if not limits[0] or np.min(gs) < limits[0]:
limits[0] = np.min(gs)
if not limits[1] or np.max(gs) > limits[1]:
limits[1] = np.max(gs)
if not limits[2] or np.min(m) < limits[2]:
limits[2] = np.min(m)
if not limits[3] or np.max(m) > limits[3]:
limits[3] = np.max(m)
log_data.append((global_steps, data))
def plot_losses_from_logs(all_logs):
train_losses = {}
limits = [None, None, None, None]
for run_type, file_pattern in all_logs.items():
log_filelist = sorted(glob.glob(file_pattern))
train_losses[run_type] = []
get_train_losses(log_filelist, train_losses[run_type], limits)
plt.figure(1, figsize=(8, 10), dpi=300)
idx = 1
for run_type, loss_data in train_losses.items():
ax = plt.subplot(2, 2, idx)
ax.set_title(run_type)
ax.set_xlim(limits[0], limits[1])
ax.set_ylim(limits[2], limits[3])
ax.set_xlabel("training steps")
ax.set_ylabel("training loss")
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
for steps, losses in loss_data:
losses = np.array(losses)
plt.plot(steps, losses)
idx += 1
train_stats = {}
for run_type, loss_data in train_losses.items():
train_stats[run_type] = {
"steps": loss_data[0][0],
"mean": np.mean(loss_data, axis=0)[1],
"std": np.std(loss_data, axis=0)[1],
}
plt.figure(figsize=(8, 4), dpi=300)
ax = plt.subplot(1, 1, 1)
ax.set_xlabel("training steps")
ax.set_ylabel("training loss")
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
colors = [(0, 0, 1, 0.2), (0, 1, 0, 0.2), (0, 0.5, 0, 0.2), (1, 0, 0, 0.2)]
idx = 0
for run_type, loss_data in train_stats.items():
plt.plot(loss_data["steps"], loss_data["mean"], label=run_type)
s = loss_data["std"]
m = loss_data["mean"]
plt.fill_between(
loss_data["steps"], y1=m - s, y2=m + s, color=colors[idx], label=f"σ error"
)
idx += 1
ax.legend()
plt.show()
| framework-reproducibility-master | test/seeder/plot_losses/plot_utils.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import sys
import unittest
sys.path.insert(0, '../..')
from fwr13y.d9m.utils import _Version as Version
class TestUtils(unittest.TestCase):
def test_version_attributes(self):
major = 1
minor = 2
version_string = "%d.%d.3" % (major, minor)
v = Version(version_string)
self.assertEqual(v.original_version_string, version_string)
self.assertEqual(v.major_minor_version_string, version_string[0:3])
self.assertEqual(v.major, major)
self.assertEqual(v.minor, minor)
def test_version_class(self):
v = Version('23.45.26')
self.assertTrue (v.in_list(['1.2', '2.8', '23.45']))
self.assertFalse(v.in_list(['4.5', '2.9', '99.4']))
self.assertTrue (v.at_least('23.45'))
self.assertFalse(v.at_least('23.46'))
self.assertTrue (v.at_most('23.45'))
self.assertFalse(v.at_most('23.44'))
self.assertTrue (v.between('23.44', '23.47'))
self.assertFalse(v.between('1.2', '2.4'))
self.assertFalse(v.between('100.2', '2.4'))
self.assertTrue (v.between('1.0', '200.4'))
v = Version('1.2')
self.assertTrue (v.between('0.9', '1.4'))
v = Version('10.09-tf3')
self.assertTrue (v.in_list(['10.02', '10.09', '09.12']))
self.assertTrue (v.at_least('10.09'))
self.assertFalse(v.at_least('10.10'))
self.assertTrue (v.at_most('10.09'))
self.assertFalse(v.at_most('10.08'))
def test_version_class_exceptions(self):
self.assertRaises(ValueError, Version, '10')
self.assertRaises(TypeError, Version, None)
v = Version('2.3')
self.assertRaises(ValueError, v.at_least, '1')
self.assertRaises(ValueError, v.at_least, '1.2.3')
self.assertRaises(ValueError, v.at_most, '012')
self.assertRaises(ValueError, v.at_most, '012.004.435')
self.assertRaises(ValueError, v.between, '10', '2.2')
self.assertRaises(ValueError, v.between, '1.3', '20')
# self.assertRaises(ValueError, v.between, '10.2', '2.26.2') # short-circuit
self.assertRaises(ValueError, v.between, '1.2', '2.26.2')
self.assertRaises(ValueError, v.between, '1.3.6', '20.5')
if __name__ == '__main__':
unittest.main()
| framework-reproducibility-master | test/d9m/test_utils.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import sys
import unittest
sys.path.insert(0, '../..')
import fwr13y
import get_version
class TestMisc(unittest.TestCase):
def test_version(self):
expected_version = get_version.get_version()
self.assertEqual(fwr13y.__version__, expected_version)
if __name__ == '__main__':
unittest.main()
| framework-reproducibility-master | test/d9m/test_misc.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for unsorted segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from segment_reduction_helper import SegmentReductionHelper
sys.path.insert(0, '../..')
import fwr13y.d9m.tensorflow as tf_determinism
import utils as local_test_utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Simplifies logging
# The tests in the following class were originally copied from
# https://github.com/tensorflow/tensorflow/blob/1e9b9b1568d550e6779d2ddd5d193968254d3029/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
# and were then enhanced.
# NOTE: gen_math_ops.unsorted_segment_sum has GPU kernels for the following
# data types, float16/32/64, complex64/128. The dynamic patch adopts a
# "super-accumulator" approach which does the operation in higher precision with
# necessary pre-conversion and post-conversion. Also note that integer operation
# generally has no issue with the non-associativity of floating-point rounding
# errors. Therefore the patch will not provide determinism for float64,
# complex128 or integer operands. For bfloat16, no GPU kernel is available for
# TF version less than(and equal to) 2.3. But it is likely that the patched ops
# will operate, in any given configuration, faster using float32 on GPU than
# using bfloat16 on a CPU. Therefore, we demonstrate a proof-of-concept for
# rapidly providing accelerated GPU support in frameworks for new data formats
# before they are implemented natively in hardware.
# Upstream class name: UnsortedSegmentTest
class UnsortedSegmentSumTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,
dtypes_lib.float64]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.bfloat16,
dtypes_lib.int64, dtypes_lib.int32,
dtypes_lib.complex64, dtypes_lib.complex128])
super(UnsortedSegmentSumTest, self).__init__(methodName=methodName)
def testValues(self):
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
tf_x, np_x = self._input(shape, dtype=dtype)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=True):
for np_op1, np_op2, tf_op, init_op in ops_list:
# sqrt_n doesn't support integers
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
continue
# todo(philjd): enable this test once real_div supports bfloat16
if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and
dtype == dtypes_lib.bfloat16):
continue
np_ans = self._segmentReduce(
indices, np_x, np_op1, np_op2, num_segments=num_segments,
initial_value=init_op(dtype))
s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
if dtype is dtypes_lib.bfloat16:
tf_ans = tf_ans.astype(np.float32)
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testNumSegmentsTypes(self):
dtypes = [dtypes_lib.int32, dtypes_lib.int64]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape)
num_segments_constant = constant_op.constant(
num_segments, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x,
segment_ids=indices,
num_segments=num_segments_constant)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
@tf_test_util.run_deprecated_v1
def testGradientsTFGradients(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for _, _, tf_op, _ in ops_list:
tf_x, np_x = self._input(shape, dtype=dtype)
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1.)
self.assertAllCloseAccordingToType(jacob_t, jacob_n,
half_atol=1e-2)
def _computeGradient(self, tf_op, indices, num_segments,
shape, num_cols, dtype):
tf_x, np_x = self._input(shape, dtype=dtype)
if context.executing_eagerly():
def f(x):
return tf_op(x, indices, num_segments)
gradient_tape_jacob_t, jacob_n = gradient_checker_v2.compute_gradient(
f, [tf_x], delta=1.0)
self.assertAllClose(jacob_n, gradient_tape_jacob_t)
else:
with self.cached_session():
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(jacob_t, jacob_n)
# This method has been enhanced to run on older versions of TensorFlow
@tf_test_util.run_in_graph_and_eager_modes
def testGradientsGradientTape(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
# fwr13y.d9m note: the upstream test uses tf_test_util.use_gpu, which
# seems to suffer from the same problem, and presumably does the same
# thing, as self.session(force_gpu=true). So we replaced
# tf_test_util.use_gpu with local_test_utils.force_gpu_session(self).
for use_gpu in [local_test_utils.force_gpu_session(self),
tf_test_util.force_cpu()]:
with use_gpu:
# with local_test_utils.force_gpu_session(self):
for _, _, tf_op, _ in ops_list:
self._computeGradient(tf_op, indices, num_segments, shape,
num_cols, dtype)
# Method removed because it only tests math_ops.unsorted_segment_prod
# def testProdGrad(self):
# ...
@tf_test_util.run_deprecated_v1
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.cached_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
@tf_test_util.run_deprecated_v1
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
# Note: With PR #13055 a negative index will be ignored silently.
with self.session(use_gpu=False):
for bad in [[2]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
self.evaluate(unsorted)
@tf_test_util.run_deprecated_v1
def testEmptySecondDimension(self):
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
np.complex64, np.complex128]
with self.session(use_gpu=True):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1
# for index and replace values generated by numpy with 0.
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
with self.session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
# Replace np_ans[8] with 0 for the value
np_ans[8:] = 0
# Replace 8 with -1 in indices
np.place(indices, indices == 8, [-1])
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
class UnsortedSegmentSumDeterministicTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(np.add, None,
tf.math.unsorted_segment_sum, lambda t: 0)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(np.add, None,
tf.math.unsorted_segment_sum, lambda t: 0)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.complex64, dtypes_lib.bfloat16])
self.repeat_count = 5
super(
UnsortedSegmentSumDeterministicTest, self).__init__(
methodName=methodName)
def _conditionally_skip_test(self):
if local_test_utils.tf_version_at_least('2.7'):
self.skipTest("Not testing this in TF 2.7 and onward")
def _testBackwardCase(self, dtype, indices, num_segments, op_binding, shape):
numpy_seed = 123
_, _, tf_op, _ = op_binding
input_val = self._randomDataOp(shape, dtype, seed=None)
if context.executing_eagerly():
def op_gradients(local_seed):
with backprop.GradientTape() as tape:
tape.watch(input_val)
op_output = tf_op(input_val, indices, num_segments)
upstream_gradients = self._randomDataOp(op_output.shape,
dtype, local_seed)
gradient_injector_output = op_output * upstream_gradients
return tape.gradient(gradient_injector_output, input_val)
for i in range(self.repeat_count):
local_seed = numpy_seed + i # select different upstream gradients
result_a = op_gradients(local_seed)
result_b = op_gradients(local_seed)
self.assertAllEqual(result_a, result_b)
else:
op_output = tf_op(input_val, indices, num_segments)
output_shape = op_output.shape
upstream_gradients = array_ops.placeholder(dtype, shape=output_shape,
name='upstream_gradients')
gradient_injector_output = op_output * upstream_gradients
op_gradients = gradients_impl.gradients(
gradient_injector_output,
input_val,
grad_ys=None,
colocate_gradients_with_ops=True)[0]
for i in range(self.repeat_count):
feed_dict = {upstream_gradients:np.random.random(output_shape)}
result_a = op_gradients.eval(feed_dict=feed_dict)
result_b = op_gradients.eval(feed_dict=feed_dict)
self.assertAllEqual(result_a, result_b)
# The backward operation is not known or expected to introduce nondeterminism
# but we're testing it for completeness.
@tf_test_util.run_in_graph_and_eager_modes
def testBackward(self):
num_cols = 2
num_rows = 64
num_segments = 64
segment_size = num_cols * num_rows
indices_flat = np.random.randint(low=-1, high=num_segments,
size=(segment_size,))
with local_test_utils.force_gpu_session(self):
for dtype in self.differentiable_dtypes:
for indices in indices_flat, indices_flat.reshape(num_rows, num_cols):
ops_list = self.complex_ops_list if dtype.is_complex \
else self.ops_list
for op_binding in ops_list:
shape = indices.shape + (num_cols,)
self._testBackwardCase(dtype, indices, num_segments,
op_binding, shape)
@tf_test_util.run_in_graph_and_eager_modes
def testForward(self):
# We don't patch TF version 2.7 or later, so it's not imperative that we
# test determinism of this op in those versions of TensorFlow. However,
# this test should theoretically pass on TF 2.7+ and is currently failing
# for unknown reasons.
# TODO: Get this test working/passing on TF 2.7+
self._conditionally_skip_test()
num_cols = 2
num_rows = 64
num_segments = 64
segment_size = num_cols * num_rows
indices_flat = np.random.randint(low=-1, high=num_segments,
size=(segment_size,))
with local_test_utils.force_gpu_session(self):
for dtype in self.all_dtypes:
for indices in indices_flat, indices_flat.reshape(num_rows, num_cols):
shape = indices.shape + (num_cols,)
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
x, _ = self._random_input(shape, dtype=dtype)
for _, _, tf_op, _ in ops_list:
for _ in range(self.repeat_count):
result_a = self.evaluate(tf_op(x, indices, num_segments))
result_b = self.evaluate(tf_op(x, indices, num_segments))
self.assertAllEqual(result_a, result_b)
# Prior to TF 2.7 (when we patch), op `gen_math_ops.segment_sum()` is not
# patched for data type float64 and complex128 on GPU. A warning will be
# thrown to indicate to users float64/complex128 is still exposed to
# GPU-nondeterminism.
@tf_test_util.run_deprecated_v1
def testNonSupportedDataTypes(self):
self._conditionally_skip_test()
non_supported_types = (dtypes_lib.float64, dtypes_lib.complex128)
indices = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
shape = indices.shape + (2,)
with local_test_utils.force_gpu_session(self):
for dtype in non_supported_types:
ops_list = self.complex_ops_list if dtype.is_complex \
else self.ops_list
tf_x, _ = self._input(shape, dtype)
for _, _, tf_op, _ in ops_list:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
s = tf_op(tf_x, indices, num_segments)
self.evaluate(s)
# In NGC TF1 containers from 22.03 onwards, this op generates an
# extra warning ["tostring() is deprecated"]. In all other
# containers, only the expected warning is generated.
self.assertGreater(len(w), 0)
self.assertIsInstance(w[-1].message, UserWarning)
self.assertTrue("GPU-determinism" in str(w[-1].message))
class SegmentReductionTestMisc(test.TestCase):
def testSDocstring(self):
op = tf.math.unsorted_segment_sum
docstring = op.__doc__
if not docstring: # falsy (None or "")
self.fail("The patched op %s has no docstring" % op.__name__)
if docstring.startswith('ERROR'):
self.fail("The docstring for the patched op %s has not been assigned"
% op.__name__)
if __name__ == "__main__":
tf_determinism.enable_determinism()
test.main()
| framework-reproducibility-master | test/d9m/test_unsorted_segment_sum_d9m.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
sys.path.insert(0, '../..')
expected_exception = None
if len(sys.argv) > 2 and sys.argv[1] == "--expected-exception":
expected_exception_string = sys.argv[2]
if expected_exception_string == "TypeError":
expected_exception = TypeError
from fwr13y.d9m.tensorflow import patch
try:
patch()
except Exception as e:
if type(e) == expected_exception:
print("Expected exception (%s) caught: " % expected_exception_string + str(e))
sys.exit(0)
else:
print("Unexpected exception: %s" % str(e))
sys.exit(1)
if expected_exception is not None:
print("Expected exception (%s) didn't occur!" % expected_exception_string)
sys.exit(1)
| framework-reproducibility-master | test/d9m/test_patch_apply.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import sys
sys.path.insert(0,'../../fwr13y')
from version import __version__
def get_version():
return __version__
if __name__ == "__main__":
print(__version__)
| framework-reproducibility-master | test/d9m/get_version.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
sys.path.insert(0, '../..')
from fwr13y.d9m.tensorflow import enable_determinism
enable_determinism()
| framework-reproducibility-master | test/d9m/test_enable_determinism_apply.py |
import tensorflow as tf
from fwr13y.d9m.utils import _Version as Version
# Notes about force_gpu_session:
#
# In TF1.15 and TF2.0, an apparent bug in tf.test.TestCase::session prevents us
# from throwing an exception when a GPU is not available (by setting the
# force_gpu argument of session to True). On those versions of TensorFlow, if
# force_gpu is set to True when there are two GPUs in the machine, including
# a GPU such as a GeForce GT 710 (for display only), the TensorFlow code that
# checks for the presence of GPUs produces an exception (that includes a
# mention of XLA). We assume that this bug went unnoticed in the TensorFlow CI
# test suite at least partly because those CI machines are, presumably,
# headless, single GPU machines. Therefore, the responsibility, if tests
# were only run on those TensorFlow versions, would be on the user to make
# sure to run the tests on a machine that contains a GPU. Luckly, our test
# suite runs on other versions of TensorFlow, and will throw an exception if
# there is no GPU present.
#
# In TF1.15 and TF2.0, Setting the config keyword argument of
# tf.test.TestCase::session also activates the above bug related to detecting
# the presence of a GPU. So the config keyword argument must not be set on those
# versions of TensorFlow.
def force_gpu_session(test_object):
"""A work-around for a bug in TensorFlow versions 1.15 and 2.0
If you want to use tf.test.TestCase::session with force_gpu=True on versions
of TensorFlow including version 1.15 or 2.0, then call this function instead
to gracefully work-around a bug in which an XLA-related exception is thrown
(when there are GPUs available) on some machines. On TensorFlow 1.15 and 2.0
it will return tf.test.TestCase::session(use_gpu=True). On other versions of
TensorFlow, it will return tf.test.TestCase::session(force_gpu=True).
Typical usage is as follows:
with force_gpu_session(test_object):
# ... eager-mode or graph-mode code
Args:
test_object:
A reference to the test object, an instance of tf.test.TestCase.
Returns:
None
Raises:
ValueError if test_object is not an instance of tf.test.TestCase.
If a GPU is not available, this function is expected to raise an exception
on all versions of TensorFlow except 1.15 and 2.0.
"""
if not isinstance(test_object, tf.test.TestCase):
raise ValueError("test_object must be an instance of tf.test.TestCase")
if tf_version_in_list(['1.15', '2.0']):
print("WARNING:"
"an exception will not be thrown if there is no GPU present.")
# The same bug that makes force_gpu=True throw an exception on some machines
# containing more than one GPU also prevents us from checking for the
# presence of a GPU using tf.test.is_gpu_available so that we can throw
# an exception if one isn't.
return test_object.session(use_gpu=True)
else:
return test_object.session(force_gpu=True)
def tf_version_in_list(list_of_versions):
return Version(tf.version.VERSION).in_list(list_of_versions)
def tf_version_at_least(version):
return Version(tf.version.VERSION).at_least(version)
| framework-reproducibility-master | test/d9m/utils.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from segment_reduction_helper import SegmentReductionHelper
sys.path.insert(0, '../..')
import fwr13y.d9m.tensorflow as tf_determinism
import utils as local_test_utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Simplifies logging
# The tests in the following class were originally copied from
# https://github.com/tensorflow/tensorflow/blob/1e9b9b1568d550e6779d2ddd5d193968254d3029/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
# and were then enhanced.
# NOTE: Op `gen_math_ops.segment_sum` has GPU kernels for the following data
# types float16/32/64. The dynamic patch adopts a "super-accumulator" approach
# which does the operation in higher precision with necessary pre-conversion
# and post-conversion. Also note that integer operation generally has no issue
# with the non-associativity of floating-point rounding errors. Therefore the
# patch will not provide determinism for float64 or integer operands. For
# bfloat16, no GPU kernel is available for TF version less than(and equal to)
# 2.3. But it is likely that the patched ops will operate, in any given
# configuration, faster using float32 on GPU than using bfloat16 on a CPU.
# Therefore, we demonstrate a proof-of-concept for rapidly providing accelerated
# GPU support in frameworks for new data formats before they are implemented
# natively in hardware.
# Upstream class name: SegmentReductionOpTest
class SegmentSumTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
@tf_test_util.run_deprecated_v1
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
@tf_test_util.run_deprecated_v1
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
@tf_test_util.run_deprecated_v1
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@tf_test_util.run_deprecated_v1
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum]:
with self.cached_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
# Method removed because it only tests math_ops.segment_mean
# def testDataInvalid(self):
# ...
class SegmentSumDeterministicTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.segment_sum, lambda t: 0),
(np.add, None,
tf.math.segment_sum, lambda t: 0)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.segment_sum, lambda t: 0),
(np.add, None,
tf.math.segment_sum, lambda t: 0)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32]
self.all_dtypes = (self.differentiable_dtypes + [dtypes_lib.bfloat16])
self.repeat_count = 5
super(SegmentSumDeterministicTest,
self).__init__(methodName=methodName)
def _conditionally_skip_test(self):
if local_test_utils.tf_version_at_least('2.7'):
self.skipTest("Not testing this in TF 2.7 and onward")
def _testBackwardCase(self, dtype, indices, tf_op, shape):
numpy_seed = 123
input_val = self._randomDataOp(shape, dtype, seed=None)
output_shape = [indices[-1]+1, shape[1]]
if context.executing_eagerly():
def op_gradients(local_seed):
with backprop.GradientTape() as tape:
tape.watch(input_val)
op_output = tf_op(input_val, indices)
upstream_gradients = self._randomDataOp(output_shape, dtype, local_seed)
gradient_injector_output = op_output * upstream_gradients
return tape.gradient(gradient_injector_output, input_val)
for i in range(self.repeat_count):
local_seed = numpy_seed + i # select different upstream gradients
result_a = op_gradients(local_seed)
result_b = op_gradients(local_seed)
self.assertAllEqual(result_a, result_b)
else:
op_output = tf_op(input_val, indices)
upstream_gradients = array_ops.placeholder(dtype, shape=output_shape,
name='upstream_gradients')
gradient_injector_output = op_output * upstream_gradients
op_gradients = gradients_impl.gradients(
gradient_injector_output,
input_val,
grad_ys=None,
colocate_gradients_with_ops=True)[0]
for i in range(self.repeat_count):
feed_dict = {upstream_gradients:np.random.random(output_shape)}
result_a = op_gradients.eval(feed_dict=feed_dict)
result_b = op_gradients.eval(feed_dict=feed_dict)
self.assertAllEqual(result_a, result_b)
# The backward operation is not known or expected to introduce nondeterminism
# but we're testing it for completeness.
@tf_test_util.run_in_graph_and_eager_modes
def testBackward(self):
num_cols = 8
num_segments = 32
segment_size = 256
shape = [segment_size, num_cols]
indices = np.random.randint(low=0, high=num_segments, size=(segment_size,))
indices = np.sort(indices)
with local_test_utils.force_gpu_session(self):
# with self.session(force_gpu=True):#force_gpu=True leads to XLA issue
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex \
else self.ops_list
for _, _, tf_op, _ in ops_list:
self._testBackwardCase(dtype, indices, tf_op, shape)
@tf_test_util.run_in_graph_and_eager_modes
def testForward(self):
# We don't patch TF version 2.7 or later, so it's not imperative that we
# test determinism of this op in those versions of TensorFlow. However,
# this test should theoretically pass on TF 2.7+ and is currently failing
# for unknown reasons.
# TODO: Get this test working/passing on TF 2.7+
self._conditionally_skip_test()
num_cols = 8
num_segments = 32
segment_size = 256
shape = [segment_size, num_cols]
indices = np.random.randint(low=0, high=num_segments, size=(segment_size,))
indices = np.sort(indices)
with local_test_utils.force_gpu_session(self):
for dtype in self.all_dtypes:#(dtypes_lib.complex64,)
ops_list = self.complex_ops_list if dtype.is_complex \
else self.ops_list
tf_x, _ = self._random_input(shape, dtype=dtype)
# have to use float to exec nond9m
for _, _, tf_op, _ in ops_list:
for _ in range(self.repeat_count):
# pass
result_a=tf_op(data=tf_x, segment_ids=indices)
result_b=tf_op(data=tf_x, segment_ids=indices)
self.assertAllEqual(result_a, result_b)
# Prior to TF 2.7 (when we patch), op `gen_math_ops.segment_sum()` is not
# patched for data type float64 on GPU. A warning will be thrown to indicate
# to users float64 is still exposed to GPU-nondeterminism.
@tf_test_util.run_in_graph_and_eager_modes
def testNonSupportedDataTypes(self):
self._conditionally_skip_test()
shape = [10, 2]
indices = [i // 3 for i in range(10)]
non_supported_types = (dtypes_lib.float64,)
with local_test_utils.force_gpu_session(self):
for dtype in non_supported_types:
ops_list = self.complex_ops_list if dtype.is_complex \
else self.ops_list
tf_x, _ = self._input(shape, dtype)
for _, _, tf_op, _ in ops_list:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
s = tf_op(data=tf_x, segment_ids=indices)
self.evaluate(s)
# In NGC TF1 containers from 22.03 onwards, this op generates an
# extra warning ["tostring() is deprecated"]. In all other
# containers, only the expected warning is generated.
self.assertGreater(len(w), 0)
self.assertIsInstance(w[-1].message, UserWarning)
self.assertTrue("GPU-determinism" in str(w[-1].message))
class SegmentReductionTestMisc(test.TestCase):
def testSDocstring(self):
op = tf.math.segment_sum
docstring = op.__doc__
if not docstring: # falsy (None or "")
self.fail("The patched op %s has no docstring" % op.__name__)
if docstring.startswith('ERROR'):
self.fail("The docstring for the patched op %s has not been assigned"
% op.__name__)
if __name__ == "__main__":
tf_determinism.enable_determinism()
test.main()
| framework-reproducibility-master | test/d9m/test_segment_sum_d9m.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import sys
import unittest
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Simplifies logging
# Notes:
# 0. These notes are relevant to this current file and also
# test_patch_segment_sum.py and test_patch_unsorted_segment_sum.py
# 1. The ops were expected to operate deterministically on the CPU and they do
# indeed operate deterministically if forcely pinned to the CPU with
# tf.device('/device:CPU:0'). What is not fully understood is why when they
# are placed on the CPU using self.session(use_gpu=False), the ops still
# introduce nondeterminism. By setting the log_device_placement parameter in
# the session config to True under these conditions, we are able to confirm
# that the ops are running on the CPU.
# 2. To capture nondeterminism, random input data is necessary.
# 3. The nondeterminism of dtypes_lib.float64, dtypes_lib.complex128 cannot be
# removed by this patch, so they are not tested.
# 4. The regular op tests below, represented by all the test classes except the
# final two, which have names ending in "Deterministic", were taken from
# tensorflow/python/kernel_tests/segment_reduction_ops_test.py
# (as of 2020-08-02); URL to file-at-commit:
# https://github.com/tensorflow/tensorflow/blob/6371d4a38cfb122a8d9b2a03d5f56444e95462b0/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
# 5. The names of most of the upstream test classes are confusing (even more so
# in the context of their limited use here), so the names have been changed
# here, as appropriate, along with comments to indicate the original test
# class names.
class SegmentReductionHelper(test.TestCase):
def _random_input(self, input_shape, dtype=dtypes_lib.int32):
np.random.seed(hash(dtype) % 256)
np_values = np.random.random(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _randomDataOp(self, shape, data_type, seed):
if seed is not None:
np.random.seed(seed)
return constant_op.constant(np.random.random_sample(shape), dtype=data_type)
def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,
initial_value=0):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_segments is None:
num_segments = indices[-1] + 1
output = [None] * num_segments
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
initial_value_slice = np.ones(slice_shape) * initial_value
output = [o if o is not None else initial_value_slice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
def _sqrt_n_reduce_op(self, x):
return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x
| framework-reproducibility-master | test/d9m/segment_reduction_helper.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.insert(0, '../..')
import numpy as np
import tensorflow as tf
from fwr13y.d9m import utils as package_utils
from fwr13y.d9m import tensorflow as tf_determinism
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
import utils as test_utils
# The tests in the following class were originally copied from
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/kernel_tests/bias_op_test.py
# and were then enhanced.
#
# The enhanced test code was then merged back into the
# stock TensorFlow repo (via PR 31465:
# https://github.com/tensorflow/tensorflow/pull/31465) and are represented, with
# additional deterministic op functionality and the determinism tests at the
# end of this file, in stock TensorFlow version 2.1.0.
#
# The three test functions testInputDims, testBiasVec, and testBiasInputsMatch,
# which are present in the stock TensorFlow test class, and which run on the
# nondeterministic version of the bias_add op, are not included in the following
# test class because the error-check functionality that they test is missing
# from the deterministic op patch.
@test_util.run_all_in_graph_and_eager_modes
class BiasAddTest(test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
[bias.shape[0]])
def testNpBias(self):
self.assertAllClose(
np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.cached_session(use_gpu=use_gpu):
tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias))
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
# fill the input value to at least 3-dimension
if np_value.ndim < 3:
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
return np_value
def _NHWCToNCHW(self, np_value):
# fill the input value to at least 3-dimension
np_value = self._AtLeast3d(np_value)
# move the last dimension to second
np_dim = list(range(np_value.ndim))
np_dim_new = list(np_dim[0:1]) + list(np_dim[-1:]) + list(np_dim[1:-1])
return np.transpose(np_value, np_dim_new)
def _NCHWToNHWC(self, np_value):
assert len(np_value.shape) >= 3
np_dim = list(range(np_value.ndim))
# move the second dimension to the last
np_dim_new = list(np_dim[0:1]) + list(np_dim[2:]) + list(np_dim[1:2])
return np.transpose(np_value, np_dim_new)
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.cached_session(use_gpu=use_gpu):
tf_val = self.evaluate(nn_ops.bias_add(np_inputs, np_bias,
data_format="NCHW"))
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
def _conditionally_skip_test(self):
# These tests have been disabled because they were failing on TF 1.13.
# We're not patching the bias add op in TF 1.13, and therefore do not need
# to test the normal operation of the op, which will have been tested in/by
# the framework's regression testing. Note that we do still test that the op
# appears to function deterministically. See the determinism test at the end
# of this file.
if test_utils.tf_version_in_list(['1.13']):
self.skipTest("No need to run this test on TF version 1.13")
def testIntTypes(self):
self._conditionally_skip_test()
for t in [np.int8, np.int16, np.int32, np.int64]:
self._testAll(
np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
np.array([1, 2, 3]).astype(t))
def testFloatTypes(self):
self._conditionally_skip_test()
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
def test4DFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3).astype(t),
np.random.rand(3).astype(t))
self._testAll(
np.random.rand(2048, 4, 4, 4).astype(t),
np.random.rand(4).astype(t))
self._testAll(
np.random.rand(4, 4, 4, 2048).astype(t),
np.random.rand(2048).astype(t))
def test5DFloatTypes(self):
self._conditionally_skip_test()
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3, 4).astype(t),
np.random.rand(4).astype(t))
def _random_tensor(self, shape, dtype):
return constant_op.constant(2 * np.random.rand(*shape) - 1, dtype=dtype)
def _computeGradient(self, np_input, bias, dtype, data_format):
input_shape = output_shape = np_input.shape
bias_shape = bias.shape
input_tensor = constant_op.constant(
np_input, shape=input_shape, dtype=dtype)
bias_tensor = constant_op.constant(bias, shape=bias_shape, dtype=dtype)
if context.executing_eagerly():
def bias_add(input_tensor, bias_tensor):
return nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
# The following is a work-around for TF issue 33660. Instead of
# calculating the analytical and numerical gradients for both
# inputs in a single call to compute_gradient, compute_gradient
# is called for each input separately.
def bias_add_1(input_tensor):
return bias_add(input_tensor, bias_tensor)
def bias_add_2(bias_tensor):
return bias_add(input_tensor, bias_tensor)
input_jacob_a, input_jacob_n = gradient_checker_v2.compute_gradient(
bias_add_1, [input_tensor])
bias_jacob_a, bias_jacob_n = gradient_checker_v2.compute_gradient(
bias_add_2, [bias_tensor])
# Test gradient of BiasAddGrad
def bias_add_grad_function(upstream_gradients):
with backprop.GradientTape() as tape:
tape.watch(bias_tensor)
bias_add_output = bias_add(input_tensor, bias_tensor)
gradient_injector_output = bias_add_output * upstream_gradients
return tape.gradient(gradient_injector_output, bias_tensor)
upstream_tensor = self._random_tensor(output_shape, dtype)
grad_jacob_a, grad_jacob_n = gradient_checker_v2.compute_gradient(
bias_add_grad_function, [upstream_tensor])
else:
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
jacobians = gradient_checker.compute_gradient(
[input_tensor, bias_tensor], [input_shape, bias_shape],
output_tensor, output_shape)
(input_jacob_a, input_jacob_n), (bias_jacob_a, bias_jacob_n) = jacobians
# Test gradient of BiasAddGrad
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
grad_jacob_a, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, output_shape, bias_add_grad, bias_shape)
return ((input_jacob_a, bias_jacob_a, grad_jacob_a),
(input_jacob_n, bias_jacob_n, grad_jacob_n))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
jacob_a, jacob_n = self._computeGradient(np_input, bias, dtype,
data_format)
input_jacob_a, bias_jacob_a, grad_jacob_a = jacob_a
input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n
if dtype == np.float16:
# Compare fp16 analytical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker, but pragmatic, check (in particular, it does not test
# the op itself, only its gradient).
_, jacob_n = self._computeGradient(np_input, bias, np.float32,
data_format)
input_jacob_n, bias_jacob_n, grad_jacob_n = jacob_n
if dtype == dtypes.float64:
threshold = 1e-10
elif np_input.size >= 512:
# The 5e-3 threshold seems to have been marginal in these cases, and
# small changes in the test were pushing it over the limit.
threshold = 5e-2
else:
threshold = 5e-3
self.assertAllClose(input_jacob_a, input_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_a, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_a, grad_jacob_n, threshold, threshold)
def testGradientTensor2D(self):
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testGradientTensor3D(self):
self._conditionally_skip_test()
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(1, 3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testGradientTensor4D(self):
for (data_format, use_gpu) in [("NHWC", False)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0,
dtype=dtype.as_numpy_dtype).reshape([2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
np_input = np.arange(
1.0, 513.0,
dtype=dtype.as_numpy_dtype).reshape([64, 2, 2,
2]).astype(np.float32)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
np_input = np.arange(
1.0, 513.0,
dtype=dtype.as_numpy_dtype).reshape([2, 2, 2,
64]).astype(np.float32)
self._testGradient(np_input,
np.random.rand(64).astype(dtype.as_numpy_dtype),
dtype, data_format, use_gpu)
def testGradientTensor5D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0,
dtype=dtype.as_numpy_dtype).reshape([1, 2, 3, 4,
2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
def testEmpty(self):
self._conditionally_skip_test()
np.random.seed(7)
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
def testEmptyGradient(self):
self._conditionally_skip_test()
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for shape in (0, 0), (2, 0), (0, 2):
self._testGradient(
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
data_format, use_gpu)
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for shape in (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testGradient(
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
data_format, use_gpu)
class BiasAddTestDeterministic(test.TestCase):
def _makeShapeTuple(self, batch_size, channel_count, data_rank, data_dim,
data_layout):
data_dims = data_rank * (data_dim,)
if data_layout == 'channels_first':
shape = (batch_size,) + (channel_count,) + data_dims
elif data_layout == 'channels_last':
shape = (batch_size,) + data_dims + (channel_count,)
else:
raise ValueError('Unknown data format')
return shape
def _dataFormatFromDataLayout(self, data_layout=None):
if data_layout == 'channels_first':
return 'NCHW'
elif data_layout == 'channels_last':
return 'NHWC'
else:
raise ValueError('Unknown data_layout')
def _randomNDArray(self, shape):
return 2 * np.random.random_sample(shape) - 1
def _randomDataOp(self, shape, data_type):
return constant_op.constant(self._randomNDArray(shape), dtype=data_type)
def _testDeterministicGradientsCase(self, op_binding, data_layout, data_rank,
data_type):
seed = (
hash(data_layout) % 256 + hash(data_rank) % 256 + hash(data_type) % 256)
np.random.seed(seed)
batch_size = 10
channel_count = 8
data_dim = 14
input_shape = self._makeShapeTuple(batch_size, channel_count, data_rank,
data_dim, data_layout)
bias_shape = (channel_count,)
output_shape = input_shape
input_val = self._randomDataOp(input_shape, data_type)
bias_val = self._randomDataOp(bias_shape, data_type)
data_format = self._dataFormatFromDataLayout(data_layout)
repeat_count = 5
if context.executing_eagerly():
def bias_gradients(local_seed):
np.random.seed(local_seed)
upstream_gradients = self._randomDataOp(output_shape, data_type)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(bias_val)
bias_add_output = op_binding(input_val, bias_val,
data_format=data_format)
gradient_injector_output = bias_add_output * upstream_gradients
return tape.gradient(gradient_injector_output, bias_val)
for i in range(repeat_count):
local_seed = seed + i # select different upstream gradients
result_a = bias_gradients(local_seed)
result_b = bias_gradients(local_seed)
self.assertAllEqual(result_a, result_b)
else:
upstream_gradients = array_ops.placeholder(data_type, shape=output_shape,
name='upstream_gradients')
bias_add_output = op_binding(input_val, bias_val, data_format=data_format)
gradient_injector_output = bias_add_output * upstream_gradients
# The gradient function behaves as if grad_ys is multiplied by the op
# gradient result, not passing the upstram gradients through the op's
# gradient generation graph. This is the reason for using the
# gradient injector
bias_gradients = gradients_impl.gradients(
gradient_injector_output,
bias_val,
grad_ys=None,
colocate_gradients_with_ops=True)[0]
for i in range(repeat_count):
feed_dict = {upstream_gradients: self._randomNDArray(output_shape)}
result_a = bias_gradients.eval(feed_dict=feed_dict)
result_b = bias_gradients.eval(feed_dict=feed_dict)
self.assertAllEqual(result_a, result_b)
@test_util.run_in_graph_and_eager_modes
def testDeterministicGradients(self):
with test_utils.force_gpu_session(self):
# There are problems with using force_gpu=True and cached_session with
# both eager mode and graph mode in the same test. Using a non-cached
# session and putting everything inside the same session context is
# a compromise.
for op_binding in (tf.nn.bias_add, nn.bias_add, nn_ops.bias_add):
for data_layout in ('channels_first', 'channels_last'):
# With the selected layer configuration, at least in TensorFlow
# version 2.0, when data_layout='channels_last', bias_add operates
# deterministically by default. I don't know if this is true for
# all layer configurations. These cases are still being tested here,
# for completeness.
# TF1.13 only includes 2 add a note here for users
if package_utils._Version(tf.version.VERSION).equals("1.13"):
data_ranks = (2,)
else:
data_ranks = (1, 2, 3)
for data_rank in data_ranks:
for data_type in (dtypes.float16, dtypes.float32, dtypes.float64):
self._testDeterministicGradientsCase(op_binding, data_layout,
data_rank, data_type)
class BiasAddTestMisc(test.TestCase):
def testDocstringCopy(self):
docstring = tf.nn.bias_add.__doc__
if not docstring: # falsy (None or "")
self.fail("The patched op has no docstring")
if docstring.startswith('ERROR'):
self.fail("The docstring for the patched op has not been assigned")
if __name__ == "__main__":
tf_determinism.enable_determinism()
test.main()
| framework-reproducibility-master | test/d9m/test_bias_add_d9m.py |
# Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Reported at https://github.com/tensorflow/tensorflow/issues/33660
import numpy as np
import sys
import tensorflow as tf
def section(msg):
print("\n")
print("=======================================================")
print(msg)
print("-------------------------------------------------------")
def empty(rank):
shape = (0,) * rank
return np.array([]).reshape(shape)
def eager_repro_bias_add():
section("EAGER REPRO with tf.nn.biad_add")
tf.test.compute_gradient(tf.nn.bias_add, [empty(3), empty(1)])
def eager_repro_matmul():
section("EAGER RERPO with tf.linalg.matmul")
tf.test.compute_gradient(tf.linalg.matmul, [empty(2), empty(3)])
def eager_work_around():
section("EAGER WORK-AROUND")
input_val = empty(3)
bias_val = empty(1)
def bias_add_1(input_val):
return tf.nn.bias_add(input_val, bias_val)
def bias_add_2(bias_val):
return tf.nn.bias_add(input_val, bias_val)
input_jacobians = tf.test.compute_gradient(bias_add_1, [input_val])
bias_jacobians = tf.test.compute_gradient(bias_add_2, [bias_val])
def empty_tensor(shape):
return tf.constant([], shape=shape)
def graph_repro():
section("GRAPH MODE REPRO (does not throw exception)")
tf.compat.v1.disable_eager_execution()
input_shape = output_shape = (0, 0, 0)
bias_shape = (0,)
input_tensor = empty_tensor(input_shape)
bias_tensor = empty_tensor(bias_shape)
output_tensor = tf.nn.bias_add(input_tensor, bias_tensor)
with tf.compat.v1.Session() as sess:
jacobians = tf.compat.v1.test.compute_gradient(
[input_tensor, bias_tensor], [input_shape, bias_shape], output_tensor,
output_shape)
def random_tensor(shape):
return tf.constant(np.random.random_sample(shape))
# Taken from tensorflow/python/ops/gradient_checker_v2_test.py / testEmptyMatMul
def existing_test_repro():
section("EXISTING TEST REPRO")
def f(x, y):
return tf.linalg.matmul(x, y)
x = random_tensor((0, 3))
y = random_tensor((3, 4))
jacobians = tf.test.compute_gradient(f, [x, y])
if __name__ == "__main__":
if not tf.__version__.startswith("2."):
raise("This is designed to run with TensorFlow version 2")
exec("%s()" % sys.argv[1])
| framework-reproducibility-master | test/d9m/archive/tf_issue_33660/tf_issue_33660.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
__version__ = "0.6.0"
| framework-reproducibility-master | fwr13y/version.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .version import __version__
| framework-reproducibility-master | fwr13y/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import random
import time
import sys
_MAX_INT32 = 2147483647
class SeedGen:
def __init__(self, master_seed, ngpus, local_rank):
self.master_seed = master_seed
self.ngpus = ngpus
self.local_rank = local_rank
self.ntasks = 2
self._used_seeds = set()
self._rng = random.Random(0)
def __call__(self, task, epoch, shared_seed=False):
if shared_seed:
# Use the same seed for every rank
# Constant at the beginning so that the values do not repeat with not shared seeds
seed = 2 * (
self.master_seed + epoch
) * self.ntasks + task
else:
# Use different seed for every rank
seed = (
self.master_seed + (epoch * self.ngpus + self.local_rank)
) * self.ntasks + task
if seed in self._used_seeds:
print(
"Warning!!! seed has been generated more than once!!!", file=sys.stderr
)
self._used_seeds.add(seed)
self._rng.seed(seed)
return self._rng.randint(0, _MAX_INT32)
def generate_master_seed_randomly():
to_micro = 10 ** 6
# for compatibility with older python version we can't use time_ns()
seed = int(time.time() * to_micro) % _MAX_INT32
return seed
| framework-reproducibility-master | fwr13y/seeder/seed_gen.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import random
import tensorflow as tf
import numpy as np
from .seed_gen import SeedGen, generate_master_seed_randomly
class Seeder:
def __init__(self, master_seed, ngpus, local_rank):
self.master_seed_was_none = master_seed is None
if master_seed is None and local_rank == 0:
print('INFO: master_seed is None in seeder.init, random '
'master_seed will be generated (different one for each '
'worker).')
self.master_seed = (master_seed if master_seed is not None
else generate_master_seed_randomly())
self.seed_gen = SeedGen(self.master_seed, ngpus, local_rank)
self._ext_generators = []
self._ext_generators_shared = []
def register_generator(self, gen, shared=False):
if shared:
if self.master_seed_was_none:
raise Exception('master_seed was None during seeder.init, '
'seeds shared among workers cannot be used.')
self._ext_generators_shared.append(gen)
else:
self._ext_generators.append(gen)
def unregister_generator(self, gen):
try:
self._ext_generators.remove(gen)
except ValueError:
self._ext_generators_shared.remove(gen)
def reseed(self, task, epoch):
seed = self.seed_gen(task, epoch)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
for generator in self._ext_generators:
generator(seed)
if self._ext_generators_shared:
shared_seed = self.seed_gen(task, epoch, shared_seed=True)
for generator in self._ext_generators_shared:
generator(shared_seed)
_seeder_run = None
def init(master_seed, ngpus, local_rank):
global _seeder_run
_seeder_run = Seeder(master_seed, ngpus, local_rank)
def reseed(task, epoch=0):
global _seeder_run
_seeder_run.reseed(task, epoch)
def get_master_seed():
global _seeder_run
return _seeder_run.master_seed
def register_generator(gen, shared=False):
global _seeder_run
_seeder_run.register_generator(gen, shared)
def unregister_generator(gen):
global _seeder_run
_seeder_run.unregister_generator(gen)
class SeederCB(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global _seeder_run
_seeder_run.reseed(1, epoch)
| framework-reproducibility-master | fwr13y/seeder/tf.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import random
import paddle
import numpy as np
from .seed_gen import SeedGen, generate_master_seed_randomly
class Seeder:
def __init__(self, master_seed, ngpus, local_rank):
self.master_seed_was_none = master_seed is None
if master_seed is None and local_rank == 0:
print('INFO: master_seed is None in seeder.init, random '
'master_seed will be generated (different one for '
'each worker).')
self.master_seed = (master_seed if master_seed is not None
else generate_master_seed_randomly())
self.seed_gen = SeedGen(self.master_seed, ngpus, local_rank)
self._ext_generators = []
self._ext_generators_shared = []
def register_generator(self, gen, shared=False):
if shared:
if self.master_seed_was_none:
raise Exception('master_seed was None during seeder.init, '
'seeds shared among workers cannot be used.')
self._ext_generators_shared.append(gen)
else:
self._ext_generators.append(gen)
def unregister_generator(self, gen):
try:
self._ext_generators.remove(gen)
except ValueError:
self._ext_generators_shared.remove(gen)
def reseed(self, task, epoch):
seed = self.seed_gen(task, epoch)
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
for generator in self._ext_generators:
generator(seed)
if self._ext_generators_shared:
shared_seed = self.seed_gen(task, epoch, shared_seed=True)
for generator in self._ext_generators_shared:
generator(shared_seed)
_seeder_run = None
def init(master_seed, ngpus, local_rank):
global _seeder_run
_seeder_run = Seeder(master_seed, ngpus, local_rank)
def reseed(task, epoch=0):
global _seeder_run
_seeder_run.reseed(task, epoch)
def get_master_seed():
global _seeder_run
return _seeder_run.master_seed
def register_generator(gen, shared=False):
global _seeder_run
_seeder_run.register_generator(gen, shared)
def unregister_generator(gen):
global _seeder_run
_seeder_run.unregister_generator(gen)
| framework-reproducibility-master | fwr13y/seeder/paddle.py |
# Copyright 2022 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| framework-reproducibility-master | fwr13y/seeder/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import random
import torch
import numpy as np
from .seed_gen import SeedGen, generate_master_seed_randomly
class Seeder:
def __init__(self, master_seed, ngpus, local_rank):
self.master_seed_was_none = master_seed is None
if master_seed is None and local_rank == 0:
print('INFO: master_seed is None in seeder.init, random '
'master_seed will be generated (different one for '
'each worker).')
self.master_seed = (master_seed if master_seed is not None
else generate_master_seed_randomly())
self.seed_gen = SeedGen(self.master_seed, ngpus, local_rank)
self._ext_generators = []
self._ext_generators_shared = []
def register_generator(self, gen, shared=False):
if shared:
if self.master_seed_was_none:
raise Exception('master_seed was None during seeder.init, '
'seeds shared among workers cannot be used.')
self._ext_generators_shared.append(gen)
else:
self._ext_generators.append(gen)
def unregister_generator(self, gen):
try:
self._ext_generators.remove(gen)
except ValueError:
self._ext_generators_shared.remove(gen)
def reseed(self, task, epoch):
seed = self.seed_gen(task, epoch)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
for generator in self._ext_generators:
generator(seed)
if self._ext_generators_shared:
shared_seed = self.seed_gen(task, epoch, shared_seed=True)
for generator in self._ext_generators_shared:
generator(shared_seed)
_seeder_run = None
def init(master_seed, ngpus, local_rank):
global _seeder_run
_seeder_run = Seeder(master_seed, ngpus, local_rank)
def reseed(task, epoch=0):
global _seeder_run
_seeder_run.reseed(task, epoch)
def get_master_seed():
global _seeder_run
return _seeder_run.master_seed
def register_generator(gen, shared=False):
global _seeder_run
_seeder_run.register_generator(gen, shared)
def unregister_generator(gen):
global _seeder_run
_seeder_run.unregister_generator(gen)
| framework-reproducibility-master | fwr13y/seeder/pyt.py |
# Copyright 2022 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| framework-reproducibility-master | fwr13y/d9m/__init__.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
def _confirm_string(input):
if not isinstance(input, str): raise TypeError("Only strings are allowed!")
class _Version:
"""Provides version comparison functionality"""
def __init__(self, version_string):
"""Provide a version string containing at least a major and minor version"""
_confirm_string(version_string)
version_pieces = re.split('\.|-', version_string)
if len(version_pieces) < 2:
raise ValueError("The version string must contain at least a major and a minor version")
major = version_pieces[0]
minor = version_pieces[1]
self.original_version_string = version_string
self.major_minor_version_string = major + '.' + minor
self.major = int(major)
self.minor = int(minor)
def __str__(self):
string = (
"Version object: " +
"original_version_string: %s; " % self.original_version_string +
"major_minor_version_string: %s; " % self.major_minor_version_string +
"major: %i; " % self.major +
"minor: %i" % self.minor)
return string
def in_list(self, list_of_versions):
"""Is the version in the list of version provided?"""
for version in list_of_versions: _confirm_string(version)
return self.major_minor_version_string in list_of_versions
def _only_major_and_minor(self, version):
version_pieces = version.split('.')
if len(version_pieces) != 2:
raise ValueError("The version string must contain a major and a minor version (only)")
major = int(version_pieces[0])
minor = int(version_pieces[1])
return major, minor
def at_least(self, oldest_version):
"""Is the version at least the oldest_version provided?"""
_confirm_string(oldest_version)
oldest_major, oldest_minor = self._only_major_and_minor(oldest_version)
if (self.major > oldest_major or
self.major == oldest_major and self.minor >= oldest_minor):
return True
else:
return False
def at_most(self, newest_version):
"""Is the version at most the newest version provided?"""
_confirm_string(newest_version)
newest_major, newest_minor = self._only_major_and_minor(newest_version)
if (self.major < newest_major or
self.major == newest_major and self.minor <= newest_minor):
return True
else:
return False
def between(self, oldest_version, newest_version):
"""Is the version between the oldest and newest versions
provided (inclusive)?"""
_confirm_string(oldest_version)
_confirm_string(newest_version)
if self.at_least(oldest_version) and self.at_most(newest_version):
return True
else:
return False
def equals(self, target_version):
"""Is the version equal to the version provided?"""
_confirm_string(target_version)
target_major, target_minor = self._only_major_and_minor(target_version)
if (self.major == target_major and self.minor == target_minor):
return True
else:
return False
| framework-reproducibility-master | fwr13y/d9m/utils.py |
# Copyright 2019-2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tensorflow as tf
from . import patch_bias_add
from .. import utils
from ... import version
# This function was used to patch tf.nn.bias_add in a limited range of stock
# TensorFlow versions. It is now deprecated and we are no longer developing it.
# enable_determinism should be used.
def _patch(_silent=False):
"""Patches TensorFlow to increase determinism when running on GPUs.
Calling this method either before or after explicitly importing TensorFlow,
but always before constructing any graphs, will increase the determinsism
when running on GPUs.
Returns: nothing
Raises:
TypeError (1) if a patch is not available for the installed version of
TensorFlow (either because it doesn't need one or because one has not
yet been implemented), or (2) if there is an attempt to apply the patch
inside an NGC TF container (where it should not be needed).
"""
if not _silent:
print("WARNING: %s has been deprecated. Please use enable_determinism "
"(which supports all versions of TensorFlow)." % __name__)
if os.environ.get('NVIDIA_TENSORFLOW_VERSION'):
raise TypeError("%s: TensorFlow inside NGC containers does not "
"require patching" % __name__)
tf_vers = utils._Version(tf.version.VERSION)
if tf_vers.between('1.14', '2.0'):
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
patch_bias_add._patch_bias_add()
if not _silent:
print("TensorFlow version %s has been patched using %s version %s" %
(tf_vers.original_version_string, __name__,
version.__version__))
else:
raise TypeError("%s: No patch available for version %s of TensorFlow" %
(__name__, tf_vers.original_version_string))
| framework-reproducibility-master | fwr13y/d9m/tensorflow/patch.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import dtypes as dtypes_lib
# NOTE: This patch only provides GPU-determinism for data type float16/32,
# complex64 and bfloat16.
def _patch_unsorted_segment_sum():
_new_unsorted_segment_sum.__doc__ = tf.math.unsorted_segment_sum.__doc__
math_ops.unsorted_segment_sum = _new_unsorted_segment_sum
tf.math.unsorted_segment_sum = _new_unsorted_segment_sum # via public API
# The original, pre-patched function is automatically-generated. Therefore, we
# cannot provide a URL to its location in the source repository.
# For the history of this patch, please refer to
# https://github.com/tensorflow/tensorflow/issues/39751
def _new_unsorted_segment_sum(data, segment_ids, num_segments, name=None):
"""ERROR: docstring should have been added programatically. """
with ops.name_scope(
name, "UnsortedSegmentSum", [data, segment_ids, num_segments]) as name:
# Note that data can be a vector-like list (or an n-dimensional
# tensor-like list of lists). We convert to tensor here to replicate the
# behavior of the pre-existing op.
data = ops.convert_to_tensor(data, name="input_data")
segment_ids = ops.convert_to_tensor(segment_ids, name="segment_ids")
num_segments = ops.convert_to_tensor(num_segments, name="num_segments")
orig_dtype = data.dtype
if orig_dtype is dtypes_lib.float32:
data = tf.cast(data, dtype=tf.float64)
elif orig_dtype is dtypes_lib.float16:
data = tf.cast(data, dtype=tf.float32)
elif orig_dtype is dtypes_lib.complex64:
data = tf.cast(data, dtype=tf.complex128)
elif orig_dtype is dtypes_lib.bfloat16:
data = tf.cast(data, dtype=tf.float32)
elif orig_dtype is dtypes_lib.float64 or dtypes_lib.complex128:
warnings.warn(
"Data type %s is not supported for GPU-determinism" % data.dtype,
UserWarning)
result = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return tf.cast(result, dtype=orig_dtype)
| framework-reproducibility-master | fwr13y/d9m/tensorflow/patch_unsorted_segment_sum.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
import tensorflow as tf
# By calling the deprecated patch API here, we continue to test its effect
# without having to test it explicitly. Note that this form of import
# necessarily breaks the Google Python Style Guide rule to import packages
# and modules only (and not individual functions).
from . import patch as patch_api_module
from . import patch_segment_sum
from . import patch_unsorted_segment_sum
from .. import utils
from ... import version
def _enable_determinism():
"""Provides a best-effort recipe to increase framework determinism when
running on GPUs.
Call this method after importing TensorFlow and before constructing any
graphs.
This function cannot address all possible sources of nondeterminism. Please
see further instructions at
https://github.com/NVIDIA/framework-reproducibility to understand how to use
it in a larger deterministic context.
Arguments:
None
Returns: None
"""
tf_vers = utils._Version(tf.version.VERSION)
ngc_tf_container_version_string = os.environ.get('NVIDIA_TENSORFLOW_VERSION')
if ngc_tf_container_version_string:
in_ngc_cont = True
ngc_vers = utils._Version(ngc_tf_container_version_string)
else:
in_ngc_cont = False
if tf_vers.at_least('2.8'):
tf.config.experimental.enable_op_determinism()
elif in_ngc_cont and ngc_vers.at_least('19.06') or tf_vers.at_least('2.1'):
os.environ['TF_DETERMINISTIC_OPS'] = '1'
elif not in_ngc_cont and tf_vers.between('1.14', '2.0'):
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
patch_api_module._patch(_silent=True)
if (in_ngc_cont and ngc_vers.between('19.06', '21.12') or
tf_vers.between('1.14', '2.6')):
patch_segment_sum._patch_segment_sum()
patch_unsorted_segment_sum._patch_unsorted_segment_sum()
if tf_vers.at_least('2.5'):
os.environ['TF_DISABLE_SEGMENT_REDUCTION_OP_DETERMINISM_EXCEPTIONS'] = '1'
print("%s (version %s) has been applied to TensorFlow "
"version %s" % (__name__, version.__version__,
tf_vers.original_version_string))
| framework-reproducibility-master | fwr13y/d9m/tensorflow/enable_determinism.py |
# Copyright 2019 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# What follows is the public API for fwr13y.tensorflow
from .enable_determinism import _enable_determinism as enable_determinism
from .patch import _patch as patch # deprecated
| framework-reproducibility-master | fwr13y/d9m/tensorflow/__init__.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
def _patch_bias_add():
_new_bias_add.__doc__ = tf.nn.bias_add.__doc__
tf.nn.bias_add = _new_bias_add # access via public API
nn.bias_add = _new_bias_add # called from tf.keras.layers.convolutional.Conv
nn_ops.bias_add = _new_bias_add # called from tests
# The original, pre-patched method can be viewed at
# https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/ops/nn_ops.py#L2628
#
# This patched version of bias_add does not implement some of the error checks
# provided by the original op. For more information, see the list of test cases
# excluded from the testing of the patched op functionality.
def _new_bias_add(value, bias, data_format=None, name=None):
"""ERROR: docstring should have been added programatically. """
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
if data_format == 'NCHW':
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(array_ops.rank(value) - 2,
dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
| framework-reproducibility-master | fwr13y/d9m/tensorflow/patch_bias_add.py |
# Copyright 2020 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import dtypes
# NOTE: This patch only provides GPU-determinism for data type float16/32 and
# bfloat16.
def _patch_segment_sum():
_new_segment_sum.__doc__ = tf.math.segment_sum.__doc__
math_ops.segment_sum = _new_segment_sum
tf.math.segment_sum = _new_segment_sum # access via public API
# The original, pre-patched function is automatically-generated. Therefore, we
# cannot provide a URL to its location in the source repository.
# For the history of this patch, please refer to
# https://github.com/tensorflow/tensorflow/issues/39751
def _new_segment_sum(data, segment_ids, name=None):
"""ERROR: docstring should have been added programatically. """
with ops.name_scope(name, "SegmentSum", [data, segment_ids]) as name:
if not context.executing_eagerly():
# Note that data can be a vector-like list (or an n-dimensional
# tensor-like list of lists). We convert to tensor here to replicate the
# behavior of the pre-existing op.
data = ops.convert_to_tensor(data, name="input_data")
segment_ids = ops.convert_to_tensor(segment_ids, name="segment_ids")
orig_dtype = data.dtype
if orig_dtype is dtypes.float32:
data = tf.cast(data, dtype=tf.float64)
elif orig_dtype is dtypes.float16:
data = tf.cast(data, dtype=tf.float32)
elif orig_dtype is dtypes.bfloat16:
data = tf.cast(data, dtype=tf.float32)
elif orig_dtype is dtypes.float64:
warnings.warn(
"Data type %s is not supported for GPU-determinism" %
data.dtype, UserWarning)
result = gen_math_ops.segment_sum(data, segment_ids)
return tf.cast(result, dtype=orig_dtype)
| framework-reproducibility-master | fwr13y/d9m/tensorflow/patch_segment_sum.py |
# Copyright 2020-2023 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import importlib
import setuptools
# package_name = 'fwd9m'
package_name = 'tfdeterminism'
print("PACKAGE IMPORT WARNING (expected):")
package = importlib.import_module(package_name)
description = ("Providing reproducibility in deep learning frameworks")
url = "https://github.com/NVIDIA/%s" % package.distribution_name
print("Now running setuptools.setup()")
# Note that using python 3.6 (i.e. via the `python3.6` executable) results in
# the long_description_content_type being ignored.
# For more info, see https://github.com/di/markdown-description-example/issues/4
setuptools.setup(
name = package.distribution_name,
version = package.version,
packages = [package_name],
url = url,
license = 'Apache 2.0',
author = 'NVIDIA',
author_email = '[email protected]',
description = description,
long_description = package.long_description,
long_description_content_type = 'text/markdown',
install_requires = [],
classifiers = [],
keywords = [],
platforms = []
)
| framework-reproducibility-master | deprecated/setup.py |
# Copyright 2023 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
distribution_name = "tensorflow-determinism"
version = "0.4.0"
long_description = """
The `tensorflow-determinism` PyPI distribution has been deprecated and the
`tfdeterminism` package in this distribution contains no code.
Please install the latest version of the
[framework-reproducibility](https://pypi.org/project/framework-reproducibility/)
PyPI distribution.
"""
print(long_description)
| framework-reproducibility-master | deprecated/tfdeterminism/__init__.py |
# Copyright 2023 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
distribution_name = "framework-determinism"
version = "0.1.0"
long_description = """
The `framework-determinism` PyPI distribution has been deprecated and the
`fwd9m` package in this distribution contains no code.
Please install the latest version of the
[framework-reproducibility](https://pypi.org/project/framework-reproducibility/)
PyPI distribution.
"""
print(long_description)
| framework-reproducibility-master | deprecated/fwd9m/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to build the TAO Toolkit package."""
import os
import setuptools
from release.python.utils import utils
PACKAGE_LIST = [
"nvidia_tao_tf2"
]
version_locals = utils.get_version_details()
setuptools_packages = []
for package_name in PACKAGE_LIST:
setuptools_packages.extend(utils.find_packages(package_name))
setuptools.setup(
name=version_locals['__package_name__'],
version=version_locals['__version__'],
description=version_locals['__description__'],
author='NVIDIA Corporation',
classifiers=[
'Environment :: Console',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
license=version_locals['__license__'],
keywords=version_locals['__keywords__'],
packages=setuptools_packages,
package_data={
'': ['*.py', "*.pyc", "*.yaml", "*.so", "*.pdf"]
},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'classification_tf2=nvidia_tao_tf2.cv.classification.entrypoint.classification:main',
'efficientdet_tf2=nvidia_tao_tf2.cv.efficientdet.entrypoint.efficientdet:main',
]
}
)
| tao_tensorflow2_backend-main | setup.py |
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
from pathlib import Path
import os
abspath = os.path.dirname(os.path.realpath(__file__))
license_header = """#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# Generate version file
with open(os.path.join(abspath, "VERSION")) as f:
version = f.read().strip()
with open(os.path.join(abspath, "tensorflow_quantization/version.py"), "w") as f:
f.write(license_header)
f.write(F"__version__ = \"{version}\"")
project_dir = Path(__file__).parent
# Setting up
setup(
name="tensorflow_quantization",
version=version,
description="NVIDIA TensorFlow 2.x quantization toolkit",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
packages=["tensorflow_quantization"],
python_requires=">=3.6",
include_package_data=True,
author="NVIDIA",
author_email="[email protected]",
license="Apache 2.0",
) | tao_tensorflow2_backend-main | docker/tensorflow_quantization/setup.py |
# Copyright NVIDIA Corporation
"""Instantiate the TAO-TF2 docker container for developers."""
import argparse
from distutils.version import LooseVersion
import json
import os
import subprocess
import sys
ROOT_DIR = os.getenv("NV_TAO_TF2_TOP", os.path.dirname(os.path.dirname(os.getcwd())))
print(f"Current root directory {ROOT_DIR}")
with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file:
docker_config = json.load(m_file)
DOCKER_REGISTRY = docker_config["registry"]
DOCKER_REPOSITORY = docker_config["repository"]
DOCKER_TAG = docker_config["tag"]
DOCKER_COMMAND = "docker"
HOME_PATH = os.path.expanduser("~")
MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json")
def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH):
"""Check for docker mounts in ~/.tao_mounts.json."""
if not os.path.exists(mounts_file):
return []
with open(mounts_file, 'r') as mfile:
data = json.load(mfile)
assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key."
return data["Mounts"]
def format_mounts(mount_points):
"""Format mount points to docker recognizable commands."""
formatted_mounts = []
# Traverse through mount points and add format them for the docker command.
for mount_point in mount_points:
assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys())
mount = "{}:{}".format(mount_point["source"], mount_point["destination"])
formatted_mounts.append(mount)
return formatted_mounts
def check_image_exists(docker_image):
"""Check if the image exists locally."""
check_command = '{} images | grep "\\<{}\\>" | grep "{}" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(check_command, stdout=sys.stderr, shell=True)
return rc == 0
def pull_base_container(docker_image):
"""Pull the default base container."""
pull_command = "{} pull {}:{}".format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True)
return rc == 0
def get_formatted_mounts(mount_file):
"""Simple function to get default mount points."""
default_mounts = get_docker_mounts_from_file(mount_file)
return format_mounts(default_mounts)
def check_mounts(formatted_mounts):
"""Check the formatted mount commands."""
assert type(formatted_mounts) == list
for mounts in formatted_mounts:
source_path = mounts.split(":")[0]
if not os.path.exists(source_path):
raise ValueError("Path doesn't exist: {}".format(source_path))
return True
def get_docker_gpus_prefix(gpus):
"""Get the docker command gpu's prefix."""
docker_version = (
subprocess.check_output(
["docker", "version", "--format={{ .Server.APIVersion }}"]
)
.strip()
.decode()
)
if LooseVersion(docker_version) >= LooseVersion("1.40"):
# You are using the latest version of docker using
# --gpus instead of the nvidia runtime.
gpu_string = "--gpus "
if gpus == "all":
gpu_string += "all"
else:
gpu_string += "\'\"device={}\"\'".format(gpus)
else:
# Stick to the older version of getting the gpu's using runtime=nvidia
gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all "
if gpus != "none":
gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus)
return gpu_string
def create_base_docker():
"""Function to create the base docker."""
create_command = "bash {}/docker/build.sh --build".format(ROOT_DIR)
try:
subprocess.run(create_command, stdout=sys.stderr, shell=True, check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Container build failed with error {e}")
def instantiate_dev_docker(gpus, mount_file,
mount_cli_list,
env_var_list,
command, ulimit=None,
shm_size="16G", run_as_user=False,
port_mapping=None):
"""Instiate the docker container."""
docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_TAG)
# Invoking the nvidia docker.
gpu_string = get_docker_gpus_prefix(gpus)
# Prefix for the run command.
run_command = "{} run -it --rm".format(DOCKER_COMMAND)
# get default mount points.
formatted_mounts = get_formatted_mounts(MOUNTS_PATH)
# get mounts from cli mount file.
formatted_mounts += get_formatted_mounts(mount_file)
if mount_cli_list is not None:
formatted_mounts.extend(mount_cli_list)
assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist."
mount_string = "-v {}:/workspace/tao-tf2 ".format(os.getenv("NV_TAO_TF2_TOP", os.getcwd()))
# Defining env variables.
env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/workspace/tao-tf2")
for env in env_var_list:
if "=" not in env:
print("invalid env variable definition. skipping this {}".format(env))
continue
env_variables += "-e {} ".format(env)
for path in formatted_mounts:
mount_string += "-v {} ".format(path)
# Setting shared memory.
shm_option = "--shm-size {}".format(shm_size)
# Setting ulimits for host
ulimit_options = ""
if ulimit is not None:
for param in ulimit:
ulimit_options += "--ulimit {} ".format(param)
user_option = ""
if run_as_user:
user_option = "--user {}:{}".format(os.getuid(), os.getgid())
working_directory = "/workspace/tao-tf2"
working_dir_option = f"-w {working_directory}"
port_option = "--net=host"
if port_mapping:
port_option += f" -p {port_mapping}"
final_command = "{} {} {} {} {} {} {} {} {} {} {}".format(
run_command, gpu_string,
mount_string, env_variables,
shm_option, ulimit_options, user_option, working_dir_option,
port_option,
docker_image, " ".join(command)
)
print(final_command)
return subprocess.check_call(final_command, stdout=sys.stderr, shell=True)
def parse_cli_args(args=None):
"""Parse run container command line."""
parser = argparse.ArgumentParser(
prog="tao_tf2",
description="Tool to run the TAO Toolkit TensorFlow2 container.",
add_help=True)
parser.add_argument(
"--gpus",
default="all",
type=str,
help="Comma separated GPU indices to be exposed to the docker."
)
parser.add_argument(
"--volume",
action="append",
type=str,
default=[],
help="Volumes to bind."
)
parser.add_argument(
"--env",
action="append",
type=str,
default=[],
help="Environment variables to bind."
)
parser.add_argument(
"--mounts_file",
help="Path to the mounts file.",
default="",
type=str
)
parser.add_argument(
"--shm_size",
help="Shared memory size for docker",
default="16G",
type=str
)
parser.add_argument(
"--run_as_user",
help="Flag to run as user",
action="store_true",
default=False
)
parser.add_argument(
"--ulimit",
action='append',
help="Docker ulimits for the host machine."
)
parser.add_argument(
"--port",
type=str,
default=None,
help="Port mapping (e.g. 8889:8889)."
)
args = vars(parser.parse_args(args))
return args
def main(cl_args=None):
"""Start docker container."""
if "--" in cl_args:
index = cl_args.index("--")
# Split args to the tao docker wrapper and the command to be run inside the docker.
tao_tf_args = cl_args[:index]
command_args = cl_args[index + 1:]
else:
tao_tf_args = cl_args
command_args = ""
# parse command line args.
args = parse_cli_args(tao_tf_args)
docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY)
if not check_image_exists(docker_image):
if not pull_base_container(docker_image):
print("The base container doesn't exist locally and the pull failed. Hence creating the base container")
create_base_docker()
try:
instantiate_dev_docker(
args["gpus"], args["mounts_file"],
args["volume"], args["env"],
command_args, args["ulimit"],
args["shm_size"], args["run_as_user"],
args['port']
)
except subprocess.CalledProcessError:
# Do nothing - the errors are printed in entrypoint launch.
pass
if __name__ == "__main__":
main(sys.argv[1:])
| tao_tensorflow2_backend-main | runner/tao_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit TF2 runner""" | tao_tensorflow2_backend-main | runner/__init__.py |
import argparse
import os
import zipfile
from eff.core import Archive, File
from eff.callbacks import BinaryContentCallback
from nvidia_tao_tf2.cv.efficientdet.utils.helper import encode_eff
def parse_command_line(args):
'''Parse command line arguments.'''
parser = argparse.ArgumentParser(description='EFF Decode Tool')
parser.add_argument('-m',
'--model',
type=str,
required=True,
help='Path to the saved_model.')
parser.add_argument('-o',
'--output',
required=True,
type=str,
help='The path to tlt model.')
parser.add_argument('-k',
'--key',
required=False,
default=None,
type=str,
help='encryption key.')
return parser.parse_args(args)
def main(args=None):
args = parse_command_line(args)
encode_eff(args.model, args.output, args.key)
print("Decode successfully.")
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | internal/encode.eff.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================
"""Imports a protobuf model as a graph in Tensorboard."""
import argparse
import sys
from absl import app
from tensorflow.python.client import session
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.summary import summary
from tensorflow.python.tools import saved_model_utils
# Try importing TensorRT ops if available
# TODO(aaroey): ideally we should import everything from contrib, but currently
# tensorrt module would cause build errors when being imported in
# tensorflow/contrib/__init__.py. Fix it.
# pylint: disable=unused-import,g-import-not-at-top,wildcard-import
try:
from tensorflow.contrib.tensorrt.ops.gen_trt_engine_op import *
except ImportError:
pass
# pylint: enable=unused-import,g-import-not-at-top,wildcard-import
def import_to_tensorboard(model_dir, log_dir, tag_set):
"""View an SavedModel as a graph in Tensorboard.
Args:
model_dir: The directory containing the SavedModel to import.
log_dir: The location for the Tensorboard log to begin visualization from.
tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,
separated by ','. For tag-set contains multiple tags, all tags must be
passed in.
Usage: Call this function with your SavedModel location and desired log
directory. Launch Tensorboard by pointing it to the log directory. View your
imported SavedModel as a graph.
"""
with session.Session(graph=ops.Graph()) as sess:
input_graph_def = saved_model_utils.get_meta_graph_def(model_dir,
tag_set).graph_def
importer.import_graph_def(input_graph_def)
pb_visual_writer = summary.FileWriter(log_dir)
pb_visual_writer.add_graph(sess.graph)
print("Model Imported. Visualize by running: "
"tensorboard --logdir={}".format(log_dir))
def main(_):
import_to_tensorboard(FLAGS.model_dir, FLAGS.log_dir, FLAGS.tag_set)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
required=True,
help="The directory containing the SavedModel to import.")
parser.add_argument(
"--log_dir",
type=str,
default="",
required=True,
help="The location for the Tensorboard log to begin visualization from.")
parser.add_argument(
"--tag_set",
type=str,
default="serve",
required=False,
help='tag-set of graph in SavedModel to load, separated by \',\'')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| tao_tensorflow2_backend-main | internal/pb_to_tb.py |
import argparse
import os
import zipfile
from eff.core import Archive, File
from eff.callbacks import BinaryContentCallback
def decode_eff(eff_model_path, output_path=None, passphrase=None):
"""Decode EFF to saved_model directory.
Args:
eff_model_path (str): Path to eff model
passphrase (str, optional): Encryption key. Defaults to None.
Returns:
str: Path to the saved_model
"""
# Decrypt EFF
eff_filename = os.path.basename(eff_model_path)
eff_art = Archive.restore_artifact(
restore_path=eff_model_path,
artifact_name=eff_filename,
passphrase=passphrase)
zip_path = eff_art.get_handle()
# Unzip
ckpt_path = output_path or os.path.dirname(zip_path)
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path, exist_ok=True)
# TODO(@yuw): try catch?
with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(ckpt_path)
return ckpt_path
def parse_command_line(args):
'''Parse command line arguments.'''
parser = argparse.ArgumentParser(description='EFF Decode Tool')
parser.add_argument('-m',
'--model',
type=str,
required=True,
help='Path to the EFF file.')
parser.add_argument('-o',
'--output',
required=True,
type=str,
help='The path to output directory.')
parser.add_argument('-k',
'--key',
required=False,
default=None,
type=str,
help='encryption key.')
return parser.parse_args(args)
def main(args=None):
args = parse_command_line(args)
decode_eff(args.model, args.output, args.key)
print("Decode successfully.")
if __name__ == "__main__":
main() | tao_tensorflow2_backend-main | internal/decode_eff.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing implementation of release packaging."""
| tao_tensorflow2_backend-main | release/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the TAO Toolkit TF2 AI models/tasks."""
MAJOR = "4"
MINOR = "0.0"
PATCH = "01"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r') as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao-tf2"
__description__ = "NVIDIA's package for DNN implementation on TensorFlow 2.0 for use with TAO Toolkit."
__keywords__ = "nvidia, tao, tf2"
__contact_names__ = "Yu Wang"
__contact_emails__ = "[email protected]"
__license__ = "Apache 2.0"
| tao_tensorflow2_backend-main | release/python/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packaging modules for TAO Toolkit.""" | tao_tensorflow2_backend-main | release/python/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing utility functions required for packaging TAO Toolkit modules."""
| tao_tensorflow2_backend-main | release/python/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for packaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import setuptools
# Rename all .py files to .py_tmp temporarily.
ignore_list = ['__init__.py', '__version__.py']
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
def up_directory(dir_path, n=1):
"""Go up n directories from dir_path."""
dir_up = dir_path
for _ in range(n):
dir_up = os.path.split(dir_up)[0]
return dir_up
TOP_LEVEL_DIR = up_directory(LOCAL_DIR, 3)
def remove_prefix(dir_path):
"""Remove a certain prefix from path."""
max_path = 8
prefix = dir_path
while max_path > 0:
prefix = os.path.split(prefix)[0]
if prefix.endswith('ai_infra'):
return dir_path[len(prefix) + 1:]
max_path -= 1
return dir_path
def get_subdirs(path):
"""Get all subdirs of given path."""
dirs = os.walk(path)
return [remove_prefix(x[0]) for x in dirs]
def rename_py_files(path, ext, new_ext, ignore_files):
"""Rename all .ext files in a path to .new_ext except __init__ files."""
files = glob.glob(path + '/*' + ext)
for ignore_file in ignore_files:
files = [f for f in files if ignore_file not in f]
for filename in files:
os.rename(filename, filename.replace(ext, new_ext))
def get_version_details():
"""Simple function to get packages for setup.py."""
# Define env paths.
LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, "release/python")
# Get current __version__.
version_locals = {}
with open(os.path.join(LAUNCHER_SDK_PATH, 'version.py')) as version_file:
exec(version_file.read(), {}, version_locals)
return version_locals
def cleanup():
"""Cleanup directories after the build process."""
req_subdirs = get_subdirs(TOP_LEVEL_DIR)
# Cleanup. Rename all .py_tmp files back to .py and delete pyc files
for dir_path in req_subdirs:
dir_path = os.path.join(TOP_LEVEL_DIR, dir_path)
# TODO: @vpraveen Think about removing python files before the final
# release.
rename_py_files(dir_path, '.py_tmp', '.py', ignore_list)
pyc_list = glob.glob(dir_path + '/*.pyc')
for pyc_file in pyc_list:
os.remove(pyc_file)
def find_packages(package_name):
"""List of packages.
Args:
package_name (str): Name of the package.
Returns:
packages (list): List of packages.
"""
packages = setuptools.find_packages(package_name)
packages = [f"{package_name}.{f}" for f in packages]
packages.append(package_name)
return packages
| tao_tensorflow2_backend-main | release/python/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the NVIDIA_TAO_TF2 models/tasks."""
__version__ = "0.0.1-dev"
| tao_tensorflow2_backend-main | nvidia_tao_tf2/version.py |
tao_tensorflow2_backend-main | nvidia_tao_tf2/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Blocks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO classification model pruner."""
import logging
from abc import ABC, abstractmethod
from nvidia_tao_tf2.model_optimization.pruning.pruning import prune
import nvidia_tao_tf2.common.no_warning # noqa pylint: disable=W0611
logger = logging.getLogger(__name__)
class Pruner(ABC):
"""Base Pruner."""
def __init__(self, cfg) -> None:
"""Initialize."""
self.cfg = cfg
self.model_path = cfg.prune.checkpoint
self.key = cfg.encryption_key
self.normalizer = cfg.prune.normalizer
self.criterion = 'L2'
self.granularity = cfg.prune.granularity
self.min_num_filters = cfg.prune.min_num_filters
self.equalization_criterion = cfg.prune.equalization_criterion
self.excluded_layers = []
@abstractmethod
def _load_model(self):
pass
def set_model_path(self, model_path):
"""Method to set model path."""
self.model_path = model_path
def prune(self, threshold, excluded_layers):
"""Prune a model."""
self._load_model()
# Pruning trained model
pruned_model = prune(
model=self.model,
method='min_weight',
normalizer=self.normalizer,
criterion=self.criterion,
granularity=self.granularity,
min_num_filters=self.min_num_filters,
threshold=threshold,
equalization_criterion=self.equalization_criterion,
excluded_layers=self.excluded_layers + excluded_layers)
return pruned_model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Trainer base class."""
from abc import ABC, abstractmethod
class Trainer(ABC):
"""Abstract class for TAO model trainer."""
def __init__(self, model, config, callbacks=None):
"""Initialize."""
self.model = model
self.callbacks = callbacks
self.config = config
def set_callbacks(self, callbacks):
"""Set callbacks."""
self.callbacks = callbacks
@abstractmethod
def fit(self, train_dataset, eval_dataset,
num_epochs,
steps_per_epoch,
initial_epoch,
validation_steps,
verbose):
"""Run model fitting."""
pass
def train_step(self, data):
"""Train step."""
pass
def test_step(self, data):
"""Test step."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Module base class."""
from abc import ABC, abstractmethod
class TAOModule(ABC):
"""Abstract class for TAO Module."""
@abstractmethod
def __init__(self, hparams) -> None:
"""Init."""
pass
@abstractmethod
def configure_losses(self):
"""Configure losses."""
pass
@abstractmethod
def configure_optimizers(self):
"""Configure optimizers."""
pass
@abstractmethod
def compile(self):
"""Compile model."""
pass
def train_step(self, data):
"""Train step."""
pass
def test_step(self, data):
"""Test step."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/module.py |
"""Data preprocessing."""
from abc import ABC, abstractmethod
class Postprocessor(ABC):
"""Base class of Input processor."""
@abstractmethod
def __init__(self, config=None):
"""Initializes a new `Postprocessor`.
Args:
config: postprocessing config
"""
pass
@abstractmethod
def generate_detections(self, model_outputs):
"""Process model outputs with postprocessing ops."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/processor/postprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Processor Block."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/processor/__init__.py |
"""Data preprocessing."""
from abc import ABC, abstractmethod
class Preprocessor(ABC):
"""Base class of Input processor."""
@abstractmethod
def __init__(self, images, output_size):
"""Initializes a new `InputProcessor`.
Args:
images: The input images before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
pass
@abstractmethod
def transform(self):
"""Process input images with a series of preprocessing ops."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/processor/preprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Dataloader module root."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Dataset base class."""
from abc import ABC, abstractmethod
class Dataset(ABC):
"""Abstract class for TAO TF Dataset."""
@abstractmethod
def __init__(self, file_pattern, is_training,
use_fake_data=False, max_instances_per_image=None):
"""init function."""
pass
@abstractmethod
def dataset_parser(self, value, example_decoder, configs=None):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: a single serialized tf.Example string.
example_decoder: TF example decoder.
"""
pass
@abstractmethod
def process_example(self, values, config=None):
"""Processes one batch of data."""
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/blocks/dataloader/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all the experimental routines in TAO Toolkit."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/experimental/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import tensorflow as tf
from nvidia_tao_tf2.cv.classification.utils.helper import decode_eff
from nvidia_tao_tf2.experimental.decorators.experimental import experimental
MB = 1<<20
logger = logging.getLogger(__name__)
def parse_command_line(cl_args="None"):
"""Parse command line args."""
parser = argparse.ArgumentParser(
prog="export_tflite",
description="Export keras models to tflite."
)
parser.add_argument(
"--model_file",
type=str,
default="",
help="Path to a model file."
)
parser.add_argument(
"--key",
type=str,
default="",
help="Key to load the model."
)
parser.add_argument(
"--output_file",
type=str,
default="",
help="Path to the output model file."
)
args = vars(parser.parse_args(cl_args))
return args
@experimental
def main(cl_args=None):
"""Main wrapper to run the tflite converter."""
# Convert the model
args = parse_command_line(cl_args=cl_args)
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO"
)
input_model_file = args["model_file"]
output_model_file = args["output_file"]
key = args["key"]
if not output_model_file:
output_model_file = f"{os.path.splitext(input_model_file)[0]}.tflite"
if os.path.isdir(input_model_file):
logger.info(
"Model provided is a saved model directory at {}".format(
input_model_file
)
)
saved_model = input_model_file
else:
saved_model = decode_eff(
input_model_file,
enc_key=key
)
logger.info("Converting the saved model to tflite model.")
converter = tf.lite.TFLiteConverter.from_saved_model(
saved_model,
signature_keys=["serving_default"],
)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_model = converter.convert()
model_root = os.path.dirname(output_model_file)
if not os.path.exists(model_root):
os.makedirs(model_root)
logger.info("Writing out the tflite model.")
with open(output_model_file, "wb") as tflite_file:
model_size = tflite_file.write(tflite_model)
print(f"TFLite model of size {model_size//MB} MB was written to {output_model_file}")
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/experimental/tflite/export_tflite.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing an experimental converter to a tf model to tflite."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/experimental/tflite/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator for experimental logging."""
import logging
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO"
)
logger = logging.getLogger(__name__)
def experimental(fn):
"""Simple function to define the keras decorator.
This decorator clears any previously existing sessions
and sets up a new session.
"""
def _fn_wrapper(*args, **kwargs):
"""Clear the keras session."""
logger.warning("This is an experimental module. Please use this at your own risk.")
return fn(*args, **kwargs)
return _fn_wrapper
| tao_tensorflow2_backend-main | nvidia_tao_tf2/experimental/decorators/experimental.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator to warn usage of experimental modules."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/experimental/decorators/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit Common Helper module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO common utils used across all apps."""
import argparse
import logging
import math
from math import exp, log
import numpy as np
import os
import random
import struct
import sys
from eff_tao_encryption.tao_codec import encrypt_stream
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.regularizers import l1, l2
import tensorflow as tf
from nvidia_tao_tf2.backbones.utils_tf import swish
logger = logging.getLogger(__name__)
ENCRYPTION_OFF = False
reg_dict = {0: None, 1: l1, 2: l2}
ap_mode_dict = {0: "sample", 1: "integrate"}
# Define 1MB for filesize calculation.
MB = 2 << 20
CUSTOM_OBJS = {'swish': swish}
def update_results_dir(cfg, task):
"""Update global results_dir based on task.results_dir.
This function should be called at the beginning of a pipeline script.
Args:
cfg (Hydra config): Config object loaded by Hydra
task (str): TAO pipeline name
Return:
Updated cfg
"""
if cfg[task]['results_dir']:
cfg.results_dir = cfg[task]['results_dir']
else:
cfg.results_dir = os.path.join(cfg.results_dir, task)
cfg[task]['results_dir'] = cfg.results_dir
logger.info(f"{task.capitalize()} results will be saved at: %s", cfg.results_dir)
return cfg
def set_random_seed(seed):
"""set random seed."""
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def encode_etlt(tmp_file_name, output_file_name, input_tensor_name, key):
"""Encrypt ETLT model."""
# Encode temporary uff to output file
with open(tmp_file_name, "rb") as open_temp_file, \
open(output_file_name, "wb") as open_encoded_file:
# TODO: @vpraveen: Remove this hack to support multiple input nodes.
# This will require an update to tlt_converter and DS. Postponing this for now.
if isinstance(input_tensor_name, list):
input_tensor_name = input_tensor_name[0]
open_encoded_file.write(struct.pack("<i", len(input_tensor_name)))
open_encoded_file.write(input_tensor_name.encode())
encrypt_stream(open_temp_file,
open_encoded_file,
key, encryption=True, rewind=False)
def raise_deprecation_warning(task, subtask, args):
"""Raise a deprecation warning based on the module.
Args:
task (str): The TLT task to be deprecated.
subtask (str): The subtask supported by that task.
args (list): List of arguments to be appended.
Raises:
DeprecationWarning: With the actual command to be run.
"""
if not isinstance(args, list):
raise TypeError("There should a list of arguments.")
args_string = " ".join(args)
new_command = f"{task} {subtask} {args_string}"
raise DeprecationWarning(
f"This command has been deprecated in this version of TLT. Please run \n{new_command}"
)
def parse_arguments(cl_args, supported_tasks=None):
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('module',
default='classification',
choices=supported_tasks)
args, unknown_args = parser.parse_known_args(cl_args)
args = vars(args)
return args, unknown_args
def get_num_params(model):
"""Get the number of parameters in a model.
Args:
model(keras.model.Model): Model object to run count params.
Returns:
num_params(int): Number of parameters in a model. Represented
in units per million.
"""
return model.count_params() / 1e6
def get_model_file_size(model_path):
"""Get the size of the model.
Args:
model_path (str): UNIX path to the model.
Returns:
file_size (float): File size in MB.
"""
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file wasn't found at {model_path}")
file_size = os.path.getsize(model_path) / MB
return file_size
def setup_keras_backend(training_precision, is_training):
"""Setup Keras-specific backend settings for training or inference.
Args:
training_precision: (TrainingPrecision or None) Proto object with FP16/FP32 parameters or
None. None leaves K.floatx() in its previous setting.
is_training: (bool) If enabled, Keras is set in training mode.
"""
# Learning phase of '1' indicates training mode -- important for operations
# that behave differently at training/test times (e.g. batch normalization)
if is_training:
K.set_learning_phase(1)
else:
K.set_learning_phase(0)
# Set training precision, if given. Otherwise leave K.floatx() in its previous setting.
# K.floatx() determines how Keras creates weights and casts them (Keras default: 'float32').
if training_precision is not None:
if training_precision == 'float32':
K.set_floatx('float32')
elif training_precision == 'float16':
K.set_floatx('float16')
else:
raise RuntimeError('Invalid training precision selected')
def summary_from_value(tag, value, scope=None):
"""Generate a manual simple summary object with a tag and a value."""
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
if scope:
summary_value.tag = f'{scope}/{tag}'
else:
summary_value.tag = tag
return summary
def parse_model_load_from_config(train_config):
"""Parse model loading config from protobuf.
Input:
the protobuf config at training_config level.
Output
model_path (string): the path of model to be loaded. None if not given
load_graph (bool): Whether to load whole graph. If False, will need to recompile the model
reset_optim (bool): Whether to reset optim. This field must be true if load_graph is false.
initial_epoch (int): the starting epoch number. 0 - based
"""
load_type = train_config.WhichOneof('load_model')
if load_type is None:
return None, False, True, 0
if load_type == 'resume_model_path':
try:
epoch = int(train_config.resume_model_path.split('.')[-2].split('_')[-1])
except Exception as e:
raise ValueError("Cannot parse the checkpoint path. Did you rename it?") from e
return train_config.resume_model_path, True, False, epoch
if load_type == 'pretrain_model_path':
return train_config.pretrain_model_path, False, True, 0
if load_type == 'pruned_model_path':
return train_config.pruned_model_path, True, True, 0
raise ValueError("training configuration contains invalid load_model type.")
def check_tf_oom(func):
"""A decorator function to check OOM and raise informative errors."""
def return_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, use a smaller backbone, or enable model parallelism for "
"supported TLT architectures (see TLT documentation)."
)
sys.exit(1)
else:
# throw out the error as-is if they are not OOM error
raise e
return return_func
class StepLRScheduler(keras.callbacks.Callback):
"""Step learning rate annealing schedule.
This callback implements the step learning rate annnealing schedule according to
the progress of the current experiment. The training progress is defined as the
ratio of the current iteration to the maximum iterations. The scheduler adjusts the
learning rate of the experiment in steps at regular intervals.
Args:
base lr: Learning rate at the start of the experiment
gamma : ratio by which the learning rate reduces at every steps
step_size : step size as percentage of maximum iterations
max_iterations : Total number of iterations in the current experiment
phase
"""
def __init__(self, base_lr=1e-2, gamma=0.1, step_size=33, max_iterations=12345):
"""__init__ method."""
super().__init__()
if not 0.0 <= step_size <= 100.0:
raise ValueError('StepLRScheduler does not support a step size < 0.0 or > 100.0')
if not 0.0 <= gamma <= 1.0:
raise ValueError('StepLRScheduler does not support gamma < 0.0 or > 1.0')
self.base_lr = base_lr
self.gamma = gamma
self.step_size = step_size
self.max_iterations = max_iterations
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""Start of training method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max iterations."""
if not 0. <= progress <= 1.:
raise ValueError(
f'StepLRScheduler does not support a progress value < 0.0 or > 1.0 received ({progress})')
numsteps = self.max_iterations * self.step_size // 100
exp_factor = self.global_step / numsteps
lr = self.base_lr * pow(self.gamma, exp_factor)
return lr
class MultiGPULearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler implementation.
Implements https://arxiv.org/pdf/1706.02677.pdf (Accurate, Large Minibatch SGD:
Training ImageNet in 1 Hour) style learning rate schedule.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from start_lr linearly increase the learning rate to base_lr.
Phase 2: at every annealing point, divide learning rate by annealing divider.
Example:
```python
lrscheduler = MultiGPULearningRateScheduler(
max_iterations=max_iterations)
model.fit(X_train, Y_train, callbacks=[lrscheduler])
```
Args:
max_iterations: Total number of iterations in the experiment.
start_lr: Learning rate at the beginning. In the paper this is the learning rate used
with single GPU training.
base_lr: Maximum learning rate. In the paper base_lr is set as start_lr * number of
GPUs.
soft_start: The progress at which learning rate achieves base_lr when starting from
start_lr. Default value set as in the paper.
annealing_points: A list of progress values at which learning rate is divided by
annealing_divider. Default values set as in the paper.
annealing_divider: A divider for learning rate applied at each annealing point.
Default value set as in the paper.
"""
def __init__( # pylint: disable=W0102
self,
max_iterations,
start_lr=3e-4,
base_lr=5e-4,
soft_start=0.056,
annealing_points=[0.33, 0.66, 0.88],
annealing_divider=10.0):
"""__init__ method."""
super().__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
prev = 0.
for p in annealing_points:
if not 0.0 <= p <= 1.0:
raise ValueError('annealing_point should be >= 0.0 or <= 1.0.')
if p < prev:
raise ValueError('annealing_points should be in increasing order.')
if not soft_start < p:
raise ValueError('soft_start should be less than the first annealing point.')
prev = p
self.start_lr = start_lr
self.base_lr = base_lr
self.soft_start = soft_start # Increase to lr from start_lr until this point.
self.annealing_points = annealing_points # Divide lr by annealing_divider at these points.
self.annealing_divider = annealing_divider
self.max_iterations = max_iterations
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not 0. <= progress <= 1.:
raise ValueError(
f'MultiGPULearningRateScheduler does not support a progress value < 0.0 or > 1.0 received ({progress})')
if not self.base_lr:
return self.base_lr
lr = self.base_lr
if progress < self.soft_start:
soft_start = progress / self.soft_start
lr = soft_start * self.base_lr + (1. - soft_start) * self.start_lr
else:
for p in self.annealing_points:
if progress > p:
lr /= self.annealing_divider
return lr
class SoftStartAnnealingLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler implementation.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following 3 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr exponentially increase the learning rate to base_lr
Phase 2: soft_start <= progress < annealing_start:
Maintain the learning rate at base_lr
Phase 3: annealing_start <= progress <= 1.0:
Starting from base_lr exponentially decay the learning rate to min_lr
Example:
```python
lrscheduler = modulus.callbacks.SoftStartAnnealingLearningRateScheduler(
max_iterations=max_iterations)
model.fit(X_train, Y_train, callbacks=[lrscheduler])
```
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
annealing_start: The progress at which learning rate starts to drop from base_lr to min_lr
max_iterations: Total number of iterations in the experiment
"""
def __init__(self, max_iterations, base_lr=5e-4, min_lr_ratio=0.01, soft_start=0.1,
annealing_start=0.7):
"""__init__ method."""
super().__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
if not 0.0 <= annealing_start <= 1.0:
raise ValueError('The annealing_start variable should be >= 0.0 or <= 1.0.')
if not soft_start < annealing_start:
raise ValueError('Varialbe soft_start should not be less than annealing_start.')
self.base_lr = base_lr
self.min_lr_ratio = min_lr_ratio
self.soft_start = soft_start # Increase to lr from min_lr until this point.
self.annealing_start = annealing_start # Start annealing to min_lr at this point.
self.max_iterations = max_iterations
self.min_lr = min_lr_ratio * base_lr
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not 0. <= progress <= 1.:
raise ValueError(f'SoftStartAnnealingLearningRateScheduler does not support a progress value < 0.0 or > 1.0 received ({progress})')
if not self.base_lr:
return self.base_lr
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if self.annealing_start < 1.0:
annealing = (1.0 - progress) / (1.0 - self.annealing_start)
else: # learning rate is never annealed
annealing = 1.0
t = soft_start if progress < self.soft_start else 1.0
t = annealing if progress > self.annealing_start else t
lr = exp(log(self.min_lr) + t * (log(self.base_lr) - log(self.min_lr)))
return lr
class OneIndexedCSVLogger(keras.callbacks.CSVLogger):
"""CSV Logger with epoch number started from 1."""
def on_epoch_end(self, epoch, logs=None):
"""On epoch end."""
super().on_epoch_end(epoch + 1, logs)
class SoftStartCosineAnnealingScheduler(keras.callbacks.Callback):
"""Soft Start Cosine annealing scheduler.
learning rate in the following 2 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr linearly increase the learning rate to base_lr
Phase 2: soft_start <= progress <= 1.0:
Starting from base_lr cosine decay the learning rate to min_lr
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
max_iterations: Total number of iterations in the experiment
(https://arxiv.org/pdf/1608.03983.pdf)
"""
def __init__(self, base_lr, min_lr_ratio, soft_start, max_iterations):
"""Initalize global parameters."""
super().__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
self.max_iterations = max_iterations
self.soft_start = soft_start
self.base_lr = base_lr
self.min_lr = self.base_lr * min_lr_ratio
self.global_step = 0
def reset(self, initial_step):
"""Reset global step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
K.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
if not 0. <= progress <= 1.:
raise ValueError(f'SoftStartCosineAnnealingScheduler does not support a progress value < 0.0 or > 1.0 received ({progress})')
if not self.base_lr:
return self.base_lr
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if soft_start < 1:
lr = (self.base_lr - self.min_lr) * soft_start + self.min_lr
else:
lr = self.min_lr + (self.base_lr - self.min_lr) * \
(1 + math.cos(math.pi * (progress - self.soft_start))) / 2
return lr
class TensorBoard(keras.callbacks.Callback):
"""Callback to log some things to TensorBoard. Quite minimal, and just here as an example."""
def __init__(self, log_dir='./logs', write_graph=True, weight_hist=False):
"""__init__ method.
Args:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
weight_hist: whether plot histogram of weights.
"""
super().__init__()
self.log_dir = log_dir
self.write_graph = write_graph
self._merged = None
self._step = 0
self._weight_hist = weight_hist
def on_epoch_begin(self, epoch, logs=None):
"""on_epoch_begin method."""
# Run user defined summaries
if self._merged is not None:
summary_str = self.sess.run(self._merged)
self.writer.add_summary(summary_str, epoch)
self.writer.flush()
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = summary_from_value(name, value.item())
self.writer.add_summary(summary, self._step)
summary = summary_from_value('lr', K.get_value(self.model.optimizer.lr))
self.writer.add_summary(summary, self._step)
self._step += 1
self.writer.flush()
def set_model(self, model):
"""set_model method."""
self.model = model
self.sess = K.get_session()
if self._weight_hist:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf.summary.histogram(mapped_weight_name, weight)
self._merged = tf.summary.merge_all()
if self.write_graph:
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf.summary.FileWriter(self.log_dir)
def on_train_end(self, *args, **kwargs):
"""on_train_end method."""
self.writer.close()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO common path utils used across all apps."""
import os
def expand_path(path):
"""This function takes in a path and returns the absolute path of that path after expanding the tilde (~) character to the user's home directory to prevent path traversal vulnerability.
Args:
path (str): The path to expand and make absolute.
Returns:
str: The absolute path with expanded tilde.
"""
return os.path.abspath(os.path.expanduser(path))
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/path_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Include this in wrapper to suppress all warnings."""
# Code below to suppress as many warnings as possible
import os
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/no_warning.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common decorators used in TAO Toolkit."""
from functools import wraps
import inspect
import os
import horovod.tensorflow.keras as hvd
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.common.mlops.wandb import alert
def monitor_status(name='efficientdet', mode='training'):
"""Status monitoring decorator."""
def inner(runner):
@wraps(runner)
def _func(cfg, **kwargs):
try:
if hvd.size() > 0:
is_master = hvd.rank() == 0
except ValueError:
is_master = True
# set up status logger
if not os.path.exists(cfg.results_dir) and is_master:
os.makedirs(cfg.results_dir)
status_file = os.path.join(cfg.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
try:
s_logger.write(
status_level=status_logging.Status.STARTED,
message=f"Starting {name} {mode}."
)
alert(
title=f'{mode.capitalize()} started',
text=f'{mode.capitalize()} {name} has started',
level=0,
is_master=is_master
)
runner(cfg, **kwargs)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message=f"{mode.capitalize()} finished successfully."
)
except (KeyboardInterrupt, SystemError):
status_logging.get_status_logger().write(
message=f"{mode.capitalize()} was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
alert(
title=f'{mode.capitalize()} stopped',
text=f'{mode.capitalize()} was interrupted',
level=1,
is_master=is_master
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
alert(
title=f'{mode.capitalize()} failed',
text=str(e),
level=2,
is_master=is_master
)
raise e
return _func
return inner
def override(method):
"""Override decorator.
Decorator implementing method overriding in python
Must also use the @subclass class decorator
"""
method.override = True
return method
def subclass(class_object):
"""Subclass decorator.
Verify all @override methods
Use a class decorator to find the method's class
"""
for name, method in class_object.__dict__.items():
if hasattr(method, "override"):
found = False
for base_class in inspect.getmro(class_object)[1:]:
if name in base_class.__dict__:
if not method.__doc__:
# copy docstring
method.__doc__ = base_class.__dict__[name].__doc__
found = True
break
assert found, f'"{class_object.__name__}.{name}" not found in any base class'
return class_object
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/decorators.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import List, Optional
from dataclasses import dataclass, field
@dataclass
class WandBConfig:
"""Configuration element wandb client."""
project: str = "TAO Toolkit"
entity: Optional[str] = None
tags: List[str] = field(default_factory=lambda: [])
reinit: bool = False
sync_tensorboard: bool = True
save_code: bool = False
name: str = None
@dataclass
class ClearMLConfig:
"""Configration element for clearml client."""
project: str = "TAO Toolkit"
task: str = "train"
deferred_init: bool = False
reuse_last_task_id: bool = False
continue_last_task: bool = False
tags: List[str] = field(default_factory=lambda: [])
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/config/mlops.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing default dataclass configs."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit Common Dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/dataset/__init__.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
from six.moves import range
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item]) # noqa pylint: disable=R1728
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
'keypoints': (optional) a dictionary of keypoint string 'label' to integer
'id'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': f'category_{class_id + label_id_offset}'
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
category = {'id': item.id, 'name': name}
if item.keypoints:
keypoints = {}
list_of_keypoint_ids = []
for kv in item.keypoints:
if kv.id in list_of_keypoint_ids:
raise ValueError(
f'Duplicate keypoint ids are not allowed. Found {kv.id} more than once')
keypoints[kv.label] = kv.id
list_of_keypoint_ids.append(kv.id)
category['keypoints'] = keypoints
categories.append(category)
return categories
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/dataset/label_map_util.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
import tensorflow as tf
def int64_feature(value):
"""int64_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
"""int64_list_feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
"""bytes_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
"""bytes_list_feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
"""float_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def float_list_feature(value):
"""float_list_feature."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/dataset/dataset_util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to instantiate and return a clearml task."""
from datetime import datetime
import logging
import os
from clearml import Task
logger = logging.getLogger(__name__)
def get_clearml_task(clearml_config, network_name: str, action: str = "train"):
"""Get clearml task.
Args:
clearml_config (protobuf): Configuration element for clearml task.
network_name (str): Name of the network running the training.
Returns
task (clearml.Task): Task object.
"""
time_string = datetime.now().strftime("%d/%y/%m_%H:%M:%S")
task = None
try:
time_now = datetime.now().strftime("%d/%y/%m_%H:%M:%S")
task_name = f"{clearml_config.task}_{time_string}" if clearml_config.task \
else f"{network_name}_{action}_{time_now}"
task = Task.init(
project_name=clearml_config.project,
task_name=task_name,
deferred_init=clearml_config.deferred_init,
reuse_last_task_id=clearml_config.reuse_last_task_id,
continue_last_task=clearml_config.continue_last_task,
)
tao_base_container = os.getenv("TAO_DOCKER", None)
if tao_base_container is not None:
task.set_base_docker(tao_base_container)
return task
except Exception as e:
logger.error(
"ClearML task init failed with error %s", e
)
logger.warning(
"Training will still continue."
)
return task
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/mlops/clearml.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for connecting with Weights and Biases client."""
from datetime import datetime
import logging
import wandb
from wandb import AlertLevel
import os
DEFAULT_WANDB_CONFIG = "~/.netrc"
logger = logging.getLogger(__name__)
_WANDB_INITIALIZED = False
def alert(title, text, duration=300, level=0, is_master=True):
"""Send alert."""
alert_levels = {
0: AlertLevel.INFO,
1: AlertLevel.WARN,
2: AlertLevel.ERROR
}
if is_wandb_initialized() and is_master:
wandb.alert(
title=title,
text=text,
level=alert_levels[level],
wait_duration=duration
)
def is_wandb_initialized():
"""Check if wandb has been initialized."""
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
return _WANDB_INITIALIZED
def check_wandb_logged_in():
"""Check if weights and biases have been logged in."""
wandb_logged_in = False
try:
wandb_api_key = os.getenv("WANDB_API_KEY", None)
if wandb_api_key is not None or os.path.exists(os.path.expanduser(DEFAULT_WANDB_CONFIG)):
wandb_logged_in = wandb.login(key=wandb_api_key)
return wandb_logged_in
except wandb.errors.UsageError:
logger.warning("WandB wasn't logged in.")
return False
def initialize_wandb(project: str = "TAO Toolkit",
entity: str = None,
sync_tensorboard: bool = True,
save_code: bool = False,
name: str = "train",
config=None,
wandb_logged_in: bool = False,
results_dir: str = os.getcwd()):
"""Function to initialize wandb client with the weights and biases server.
If wandb initialization fails, then the function just catches the exception
and prints an error log with the reason as to why wandb.init() failed.
Args:
project (str): Name of the project to sync data with.
entity (str): Name of the wanbd entity.
sync_tensorboard (bool): Boolean flag to synchronize
tensorboard and wanbd visualizations.
name (str): Name of the task running.
config (OmegaConf.DictConf): Configuration element of the task that's being.
Typically, this is the yaml container generated from the `experiment_spec`
file used to run the job.
wandb_logged_in (bool): Boolean flag to check if wandb was logged in.
results_dir (str): Output directory of the experiment.
Returns:
No explicit returns.
"""
logger.info("Initializing wandb.")
try:
assert wandb_logged_in, (
"WandB client wasn't logged in. Please make sure to set "
"the WANDB_API_KEY env variable or run `wandb login` in "
"over the CLI and copy the ~/.netrc file to the container."
)
start_time = datetime.now()
time_string = start_time.strftime("%d/%y/%m_%H:%M:%S")
wandb_dir = os.path.join(results_dir, "wandb")
if not os.path.exists(wandb_dir):
os.makedirs(wandb_dir)
wandb_name = f"{name}_{time_string}"
# wandb.tensorboard.patch(root_logdir=results_dir)
wandb.init(
project=project,
entity=entity,
sync_tensorboard=sync_tensorboard,
save_code=save_code,
name=wandb_name,
config=config,
dir=wandb_dir
)
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
_WANDB_INITIALIZED = True
except Exception as e:
logger.warning("Wandb logging failed with error %s", e)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/mlops/wandb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing integration with third party mlops."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/mlops/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLOps utils."""
import logging
from nvidia_tao_tf2.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf2.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
logger = logging.getLogger(__name__)
def init_mlops(cfg, name):
"""Initialize mlops components."""
wandb_logged_in = check_wandb_logged_in()
if wandb_logged_in:
wandb_name = cfg.train.wandb.name if cfg.train.wandb.name else f"{name}_train"
initialize_wandb(
project=cfg.train.wandb.project,
entity=cfg.train.wandb.entity,
name=wandb_name,
wandb_logged_in=wandb_logged_in,
config=cfg,
results_dir=cfg.results_dir
)
if cfg.train.get("clearml", None):
logger.info("Setting up communication with ClearML server.")
get_clearml_task(
cfg.train.clearml,
network_name=name,
action="train"
)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/mlops/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common entrypoint to download default specs."""
import logging
import os
import shutil
from omegaconf import MISSING
from dataclasses import dataclass
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
logger = logging.getLogger()
@dataclass
class DefaultConfig:
"""This is a structured config for downloading default spec files."""
# Input folder where the default configs are.
source_data_dir: str = MISSING
# Output folder path
target_data_dir: str = MISSING
# Name of the worflow.
workflow: str = MISSING
spec_path = os.path.dirname(os.path.abspath(__file__))
@hydra_runner(config_path=spec_path, config_name="download_specs", schema=DefaultConfig)
def main(cfg: DefaultConfig) -> None:
"""Script to download default specs."""
if os.path.exists(cfg.target_data_dir):
if os.listdir(cfg.target_data_dir):
raise FileExistsError(
f"The target directory, `{cfg.target_data_dir}` already has files in it."
"Please empty this directory in order to avoid overwriting the default specs."
)
else:
os.makedirs(cfg.target_data_dir)
names = [item for item in os.listdir(cfg.source_data_dir) if item.endswith(".yaml")]
for spec in names:
srcname = os.path.join(cfg.source_data_dir, spec)
dstname = os.path.join(cfg.target_data_dir, spec)
shutil.copy2(srcname, dstname)
logger.info(
"Default specification files for {} downloaded to '{}'".format(cfg.workflow, cfg.target_data_dir) # noqa: pylint: disable=C0209
)
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/entrypoint/download_specs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit Entrypoint Helper Modules."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit Entrypoint Helper Modules."""
import importlib
import os
import pkgutil
import subprocess
import shlex
import sys
from time import time
from nvidia_tao_tf2.common.entrypoint import download_specs
from nvidia_tao_tf2.common.telemetry.nvml_utils import get_device_details
from nvidia_tao_tf2.common.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the python tasks in a folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
# Add new command for copying specs.
modules["download_specs"] = {
"source_data_dir": os.path.join(os.path.dirname(module_path[0]), "experiment_specs"),
"runner_path": os.path.abspath(importlib.import_module(download_specs.__name__).__file__),
"workflow": package.__name__.split(".")[0]
}
return modules
def check_valid_gpus(num_gpus, gpu_ids):
"""Check if the number of GPU's called and IDs are valid.
This function scans the machine using the nvidia-smi routine to find the
number of GPU's and matches the id's and num_gpu's accordingly.
Once validated, it finally also sets the CUDA_VISIBLE_DEVICES env variable.
Args:
num_gpus (int): Number of GPUs alloted by the user for the job.
gpu_ids (list(int)): List of GPU indices used by the user.
Returns:
No explicit returns
"""
# Ensure the gpu_ids are all different, and sorted
gpu_ids = sorted(list(set(gpu_ids)))
assert num_gpus > 0, "At least 1 GPU required to run any task."
num_gpus_available = str(subprocess.check_output(["nvidia-smi", "-L"])).count("UUID")
max_id = max(gpu_ids)
assert min(gpu_ids) >= 0, (
"GPU ids cannot be negative."
)
assert len(gpu_ids) == num_gpus, (
f"The number of GPUs ({gpu_ids}) must be the same as the number of GPU indices"
f" ({num_gpus}) provided."
)
assert max_id < num_gpus_available and num_gpus <= num_gpus_available, (
"Checking for valid GPU ids and num_gpus."
)
cuda_visible_devices = ",".join([str(idx) for idx in gpu_ids])
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
def set_gpu_info_single_node(num_gpus, gpu_ids):
"""Set gpu environment variable for single node."""
check_valid_gpus(num_gpus, gpu_ids)
env_variable = ""
visible_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
if visible_devices is not None:
env_variable = f" CUDA_VISIBLE_DEVICES={visible_devices}"
return env_variable
def command_line_parser(parser, subtasks):
"""Build command line parser."""
parser.add_argument(
'subtask',
default='train',
choices=subtasks.keys(),
help="Subtask for a given task/model.",
)
parser.add_argument(
"-e",
"--experiment_spec",
help="Path to the experiment spec file.",
default=None
)
parser.add_argument(
"-g",
"--gpus",
type=int,
help="Number of GPUs to use. The default value is 1.",
default=1
)
parser.add_argument(
"-m",
"--model_path",
default=None,
help="Path to a pre-trained model or model to continue training."
)
parser.add_argument(
"-o",
"--output_dir",
default=None,
help="Path to where the output collaterals from this task is dropped."
)
parser.add_argument(
'--gpu_index',
type=int,
nargs="+",
help="The indices of the GPU's to be used.",
default=None
)
parser.add_argument(
"--num_processes",
"-np",
type=int,
default=-1,
help=("The number of horovod child processes to be spawned. "
"Default is -1(equal to --gpus)."),
required=False
)
parser.add_argument(
"--mpirun-arg",
type=str,
default="-x NCCL_IB_HCA=mlx5_4,mlx5_6,mlx5_8,mlx5_10 -x NCCL_SOCKET_IFNAME=^lo,docker",
help="Arguments for the mpirun command to run multi-node."
)
parser.add_argument(
'--multi-node',
action='store_true',
default=False,
help="Flag to enable to run multi-node training."
)
parser.add_argument(
"--launch_cuda_blocking",
action="store_true",
default=False,
help="Debug flag to add CUDA_LAUNCH_BLOCKING=1 to the command calls."
)
# Parse the arguments.
return parser
def launch(parser, subtasks, multigpu_support=['train'], task="tao_tf2"):
"""Parse the command line and kick off the entrypoint.
Args:
parser (argparse.ArgumentParser): Parser object to define the command line args.
subtasks (list): List of subtasks.
multigpu_support (list): List of tasks that support --gpus > 1.
task (str): Task entrypoint being called.
"""
# Subtasks for a given model.
parser = command_line_parser(parser, subtasks)
cli_args = sys.argv[1:]
args, unknown_args = parser.parse_known_args(cli_args)
args = vars(args)
scripts_args = ""
if args["subtask"] not in ["download_specs"]:
assert args["experiment_spec"], (
f"Experiment spec file needs to be provided for this task: {args['subtask']}"
)
if not os.path.exists(args["experiment_spec"]):
raise FileNotFoundError(f"Experiment spec file doesn't exist at {args['experiment_spec']}")
path, name = os.path.split(args["experiment_spec"])
if path != "":
scripts_args += f" --config-path {path}"
scripts_args += f" --config-name {name}"
mpi_command = ""
gpu_ids = args["gpu_index"]
multi_node = args['multi_node']
mpirun_arg = args['mpirun_arg']
num_gpus = args["gpus"]
if gpu_ids is None:
gpu_ids = range(num_gpus)
launch_cuda_blocking = args['launch_cuda_blocking']
assert num_gpus > 0, "At least 1 GPU required to run any task."
np = args["num_processes"]
# np defaults to num_gpus if < 0
if np < 0:
np = num_gpus
if num_gpus > 1:
if not args["subtask"] in multigpu_support:
raise NotImplementedError(
f"This {args['subtask']} doesn't support multiGPU. Please set --gpus 1"
)
mpi_command = f'mpirun -np {np} --oversubscribe --bind-to none --allow-run-as-root -mca pml ob1 -mca btl ^openib'
if multi_node:
mpi_command += " " + mpirun_arg
if args['subtask'] == "download_specs":
if not args['output_dir']:
raise RuntimeError(
f"--output_dir is a mandatory arg for this subtask {args['subtask']}. "
"Please set the output dir to a valid unix path."
)
scripts_args += f"target_data_dir={args['output_dir']}"
scripts_args += f" source_data_dir={subtasks[args['subtask']]['source_data_dir']}"
scripts_args += f" workflow={subtasks[args['subtask']]['workflow']}"
script = subtasks[args['subtask']]["runner_path"]
unknown_args_string = " ".join(unknown_args)
task_command = f"python {script} {scripts_args} {unknown_args_string}"
env_variables = ""
if not multi_node:
env_variables += set_gpu_info_single_node(num_gpus, gpu_ids)
if launch_cuda_blocking:
task_command = f"CUDA_LAUNCH_BLOCKING=1 {task_command}"
run_command = f"{mpi_command} bash -c \'{env_variables} {task_command}\'"
process_passed = True
start = time()
try:
subprocess.run(
shlex.split(run_command),
env=os.environ,
shell=False,
check=True,
stdout=sys.stdout,
stderr=sys.stderr
)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
except subprocess.CalledProcessError as e:
process_passed = False
if e.output is not None:
print(f"TAO Toolkit task: {args['subtask']} failed with error:\n{e.output}")
end = time()
time_lapsed = end - start
# Computing and sending telemetry data.
try:
gpu_data = []
for device in get_device_details():
gpu_data.append(device.get_config())
print("Sending telemetry data.")
send_telemetry_data(
task,
args["subtask"],
gpu_data,
num_gpus=num_gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[Error]: {e}")
if not process_passed:
print("Execution status: FAIL")
sys.exit(-1) # returning non zero return code from the process.
print("Execution status: PASS")
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/entrypoint/entrypoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities using the NVML library for GPU devices."""
import json
import pynvml
BRAND_NAMES = {
pynvml.NVML_BRAND_UNKNOWN: "Unknown",
pynvml.NVML_BRAND_QUADRO: "Quadro",
pynvml.NVML_BRAND_TESLA: "Tesla",
pynvml.NVML_BRAND_NVS: "NVS",
pynvml.NVML_BRAND_GRID: "Grid",
pynvml.NVML_BRAND_TITAN: "Titan",
pynvml.NVML_BRAND_GEFORCE: "GeForce",
pynvml.NVML_BRAND_NVIDIA_VAPPS: "NVIDIA Virtual Applications",
pynvml.NVML_BRAND_NVIDIA_VPC: "NVIDIA Virtual PC",
pynvml.NVML_BRAND_NVIDIA_VCS: "NVIDIA Virtual Compute Server",
pynvml.NVML_BRAND_NVIDIA_VWS: "NVIDIA RTX Virtual Workstation",
pynvml.NVML_BRAND_NVIDIA_VGAMING: "NVIDIA Cloud Gaming",
pynvml.NVML_BRAND_QUADRO_RTX: "Quadro RTX",
pynvml.NVML_BRAND_NVIDIA_RTX: "NVIDIA RTX",
pynvml.NVML_BRAND_NVIDIA: "NVIDIA",
pynvml.NVML_BRAND_GEFORCE_RTX: "GeForce RTX",
pynvml.NVML_BRAND_TITAN_RTX: "TITAN RTX",
}
class GPUDevice:
"""Data structure to represent a GPU device."""
def __init__(self, pci_bus_id,
device_name,
device_brand,
memory,
cuda_compute_capability):
"""Data structure representing a GPU device.
Args:
pci_bus_id (hex): PCI bus ID of the GPU.
device_name (str): Name of the device GPU.
device_branch (int): Brand of the GPU.
"""
self.name = device_name
self.pci_bus_id = pci_bus_id
if device_brand in BRAND_NAMES.keys():
self.brand = BRAND_NAMES[device_brand]
else:
self.brand = None
self.defined = True
self.memory = memory
self.cuda_compute_capability = cuda_compute_capability
def get_config(self):
"""Get json config of the device.
Returns
device_dict (dict): Dictionary containing data about the device.
"""
assert self.defined, "Device wasn't defined."
config_dict = {}
config_dict["name"] = self.name.decode().replace(" ", "-")
config_dict["pci_bus_id"] = self.pci_bus_id
config_dict["brand"] = self.brand
config_dict["memory"] = self.memory
config_dict["cuda_compute_capability"] = self.cuda_compute_capability
return config_dict
def __str__(self):
"""Generate a printable representation of the device."""
config = self.get_config()
data_string = json.dumps(config, indent=2)
return data_string
def pynvml_context(fn):
"""Simple decorator to setup python nvml context.
Args:
f: Function pointer.
Returns:
output of f.
"""
def _fn_wrapper(*args, **kwargs):
"""Wrapper setting up nvml context."""
try:
pynvml.nvmlInit()
return fn(*args, **kwargs)
finally:
pynvml.nvmlShutdown()
return _fn_wrapper
@pynvml_context
def get_number_gpus_available():
"""Get the number of GPU's attached to the machine.
Returns:
num_gpus (int): Number of GPUs in the machine.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
return num_gpus
@pynvml_context
def get_device_details():
"""Get details about each device.
Returns:
device_list (list): List of GPUDevice objects.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
device_list = []
assert num_gpus > 0, "Atleast 1 GPU is required for TAO Toolkit to run."
for idx in range(num_gpus):
handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
device_name = pynvml.nvmlDeviceGetName(handle)
brand_name = pynvml.nvmlDeviceGetBrand(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
cuda_compute_capability = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
device_list.append(
GPUDevice(
pci_info.busId,
device_name,
brand_name,
memory.total,
cuda_compute_capability
)
)
return device_list
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/telemetry/nvml_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO utils for uploading telemetry data."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/telemetry/__init__.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Utilties to send data to the TAO Toolkit Telemetry Remote Service."""
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import requests
import urllib3
from nvidia_tao_tf2.common.telemetry.nvml_utils import get_device_details
TELEMETRY_TIMEOUT = int(os.getenv("TELEMETRY_TIMEOUT", "30"))
def get_url_from_variable(variable, default=None):
"""Get the Telemetry Server URL."""
url = os.getenv(variable, default)
return url
def url_exists(url):
"""Check if a URL exists.
Args:
url (str): String to be verified as a URL.
Returns:
valid (bool): True/Falso
"""
url_request = urllib.request.Request(url)
url_request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(url_request) # noqa pylint: disable=R1732
return True
except urllib.request.URLError:
return False
def get_certificates():
"""Download the cacert.pem file and return the path.
Returns:
path (str): UNIX path to the certificates.
"""
certificates_url = get_url_from_variable("TAO_CERTIFICATES_URL")
if not url_exists(certificates_url):
raise urllib.request.URLError("Url for the certificates not found.")
tmp_dir = tempfile.mkdtemp()
download_command = f"wget {certificates_url} -P {tmp_dir} --quiet"
try:
subprocess.check_call(
download_command, shell=True, stdout=sys.stdout
)
except Exception as exc:
raise urllib.request.URLError("Download certificates.tar.gz failed.") from exc
tarfile_path = os.path.join(tmp_dir, "certificates.tar.gz")
assert tarfile.is_tarfile(tarfile_path), (
"The downloaded file isn't a tar file."
)
with tarfile.open(name=tarfile_path, mode="r:gz") as tar_file:
filenames = tar_file.getnames()
for memfile in filenames:
member = tar_file.getmember(memfile)
tar_file.extract(member, tmp_dir)
file_list = [item for item in os.listdir(tmp_dir) if item.endswith(".pem")]
assert file_list, (
f"Didn't get pem files. Directory contents {file_list}"
)
return tmp_dir
def send_telemetry_data(network, action, gpu_data, num_gpus=1, time_lapsed=None, pass_status=False):
"""Wrapper to send TAO telemetry data.
Args:
network (str): Name of the network being run.
action (str): Subtask of the network called.
gpu_data (dict): Dictionary containing data about the GPU's in the machine.
num_gpus (int): Number of GPUs used in the job.
time_lapsed (int): Time lapsed.
pass_status (bool): Job passed or failed.
Returns:
No explicit returns.
"""
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
if os.getenv('TELEMETRY_OPT_OUT', "no").lower() in ["no", "false", "0"]:
url = get_url_from_variable("TAO_TELEMETRY_SERVER")
data = {
"version": os.getenv("TAO_TOOLKIT_VERSION", "5.0.0"),
"action": action,
"network": network,
"gpu": [device["name"] for device in gpu_data[:num_gpus]],
"success": pass_status
}
if time_lapsed is not None:
data["time_lapsed"] = time_lapsed
certificate_dir = get_certificates()
cert = ('client-cert.pem', 'client-key.pem')
requests.post(
url,
json=data,
cert=tuple([os.path.join(certificate_dir, item) for item in cert]), # noqa pylint: disable=R1728
timeout=TELEMETRY_TIMEOUT
)
shutil.rmtree(certificate_dir)
if __name__ == "__main__":
print("Send dummy data.")
gpu_data = []
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
"classification_tf2",
"train",
gpu_data,
num_gpus=1,
time_lapsed=1,
pass_status=True
)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/telemetry/telemetry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger class for TAO TF2 models."""
from abc import abstractmethod
import atexit
from datetime import datetime
import json
import logging
import os
logger = logging.getLogger(__name__)
class Verbosity():
"""Verbosity levels."""
DISABLE = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
# Defining a log level to name dictionary.
log_level_to_name = {
Verbosity.DISABLE: "DISABLE",
Verbosity.DEBUG: 'DEBUG',
Verbosity.INFO: 'INFO',
Verbosity.WARNING: 'WARNING',
Verbosity.ERROR: 'ERROR',
Verbosity.CRITICAL: 'CRITICAL'
}
class Status():
"""Status levels."""
SUCCESS = 0
FAILURE = 1
STARTED = 2
RUNNING = 3
SKIPPED = 4
status_level_to_name = {
Status.SUCCESS: 'SUCCESS',
Status.FAILURE: 'FAILURE',
Status.STARTED: 'STARTED',
Status.RUNNING: 'RUNNING',
Status.SKIPPED: 'SKIPPED'
}
class BaseLogger(object):
"""File logger class."""
def __init__(self, is_master=False, verbosity=Verbosity.DISABLE):
"""Base logger class."""
self.is_master = is_master
self.verbosity = verbosity
self.categorical = {}
self.graphical = {}
self.kpi = {}
@property
def date(self):
"""Get date from the status."""
date_time = datetime.now()
date_object = date_time.date()
return "{}/{}/{}".format( # noqa pylint: disable=C0209
date_object.month,
date_object.day,
date_object.year
)
@property
def time(self):
"""Get date from the status."""
date_time = datetime.now()
time_object = date_time.time()
return "{}:{}:{}".format( # noqa pylint: disable=C0209
time_object.hour,
time_object.minute,
time_object.second
)
@property
def categorical(self):
"""Categorical data to be logged."""
return self._categorical
@categorical.setter
def categorical(self, value: dict):
"""Set categorical data to be logged."""
self._categorical = value
@property
def graphical(self):
"""Graphical data to be logged."""
return self._graphical
@graphical.setter
def graphical(self, value: dict):
"""Set graphical data to be logged."""
self._graphical = value
@property
def kpi(self):
"""Set KPI data."""
return self._kpi
@kpi.setter
def kpi(self, value: dict):
"""Set KPI data."""
self._kpi = value
def flush(self):
"""Flush the logger."""
pass
def format_data(self, data: dict):
"""Format the data."""
if isinstance(data, dict):
data_string = []
for key, value in data.items():
data_string.append(
f"{key}: {self.format_data(value)}"
if isinstance(value, dict) else value
)
return ", ".join(data_string)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
logging.log(level, string)
@abstractmethod
def write(self, data=None,
status_level=Status.RUNNING,
verbosity_level=Verbosity.INFO,
message=None):
"""Write data out to the log file."""
if self.verbosity > Verbosity.DISABLE:
if not data:
data = {}
# Define generic data.
data["date"] = self.date
data["time"] = self.time
data["status"] = status_level_to_name.get(status_level, "RUNNING")
data["verbosity"] = log_level_to_name.get(verbosity_level, "INFO")
if message:
data["message"] = message
logging.log(verbosity_level, message)
if self.categorical:
data["categorical"] = self.categorical
if self.graphical:
data["graphical"] = self.graphical
if self.kpi:
data["kpi"] = self.kpi
data_string = self.format_data(data)
if self.is_master:
self.log(verbosity_level, data_string)
self.flush()
class StatusLogger(BaseLogger):
"""Simple logger to save the status file."""
def __init__(self, filename=None,
is_master=False,
verbosity=Verbosity.INFO,
append=True):
"""Logger to write out the status."""
super().__init__(is_master=is_master, verbosity=verbosity)
self.log_path = os.path.realpath(filename)
if is_master:
if os.path.exists(self.log_path):
logger.info("Log file already exists at %s", self.log_path)
self.l_file = open(self.log_path, "a" if append else "w", encoding='utf-8') # noqa pylint: disable=R1732
atexit.register(self.l_file.close)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
self.l_file.write(string + "\n")
def flush(self):
"""Flush contents of the log file."""
if self.is_master:
self.l_file.flush()
@staticmethod
def format_data(data):
"""Format the dictionary data."""
if not isinstance(data, dict):
raise TypeError(f"Data must be a dictionary and not type {type(data)}.")
data_string = json.dumps(data)
return data_string
# Define the logger here so it's static.
_STATUS_LOGGER = BaseLogger()
def set_status_logger(status_logger):
"""Set the status logger.
Args:
status_logger: An instance of the logger class.
"""
global _STATUS_LOGGER # pylint: disable=W0603
_STATUS_LOGGER = status_logger
def get_status_logger():
"""Get the status logger."""
global _STATUS_LOGGER # pylint: disable=W0602,W0603
return _STATUS_LOGGER
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/logging/logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for TLT IVA models."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/logging/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit Entrypoint Helper from Hydra config parsing."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/hydra/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorator to read in hydra config and validate against a dataclass structured config."""
import functools
import os
import sys
from typing import Any, Callable, Optional
from hydra._internal.utils import _run_hydra, get_args_parser
from hydra.core.config_store import ConfigStore
from hydra.types import TaskFunction
from omegaconf import DictConfig
def hydra_runner(
config_path: Optional[str] = None, config_name: Optional[str] = None, schema: Optional[Any] = None
) -> Callable[[TaskFunction], Any]:
"""Decorator used for passing the Config paths to main function.
Optionally registers a schema used for validation/providing default values.
Args:
config_path: Optional path that will be added to config search directory.
config_name: Pathname of the config file.
schema: Structured config type representing the schema used for validation/providing default values.
"""
def decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def wrapper(cfg_passthrough: Optional[DictConfig] = None) -> Any:
# Check it config was passed.
if cfg_passthrough is not None:
return task_function(cfg_passthrough)
args = get_args_parser()
# Parse arguments in order to retrieve overrides
# Returns argparse.Namespace
parsed_args = args.parse_args()
# Get overriding args in dot string format
overrides = parsed_args.overrides # type: list
# Disable the creation of .hydra subdir
# https://hydra.cc/docs/tutorials/basic/running_your_app/working_directory
overrides.append("hydra.output_subdir=null")
# Hydra logging outputs only to stdout (no log file).
# https://hydra.cc/docs/configure_hydra/logging
overrides.append("hydra/job_logging=stdout")
# Set run.dir ONLY for ExpManager "compatibility" - to be removed.
overrides.append("hydra.run.dir=.")
# Check if user set the schema.
if schema is not None:
# Create config store.
cs = ConfigStore.instance()
# Get the correct ConfigStore "path name" to "inject" the schema.
if parsed_args.config_name is not None:
path, name = os.path.split(parsed_args.config_name)
# Make sure the path is not set - as this will disable validation scheme.
if path != '':
sys.stderr.write(
"ERROR Cannot set config file path using `--config-name` when "
"using schema. Please set path using `--config-path` and file name using "
"`--config-name` separately.\n"
)
sys.exit(1)
else:
name = config_name
# Register the configuration as a node under the name in the group.
cs.store(name=name, node=schema) # group=group,
# Wrap a callable object with name `parse_args`
# This is to mimic the ArgParser.parse_args() API.
class _argparse_wrapper:
def __init__(self, arg_parser):
self.arg_parser = arg_parser
self._actions = arg_parser._actions
def parse_args(self, args=None, namespace=None):
return parsed_args
# no return value from run_hydra() as it may sometime actually run the task_function
# multiple times (--multirun)
_run_hydra(
args_parser=_argparse_wrapper(args),
task_function=task_function,
config_path=config_path,
config_name=config_name,
)
return wrapper
return decorator
| tao_tensorflow2_backend-main | nvidia_tao_tf2/common/hydra/hydra_runner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNets backbone."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf2.backbones.utils_tf import add_activation
from nvidia_tao_tf2.backbones.utils_tf import add_dense_head
from nvidia_tao_tf2.backbones.utils_tf import arg_scope
from nvidia_tao_tf2.backbones.utils_tf import CNNBlock
from nvidia_tao_tf2.backbones.utils_tf import get_batchnorm_axis
def ResNet(nlayers,
input_tensor=None,
use_batch_norm=False,
data_format='channels_first',
add_head=False,
head_activation='softmax',
nclasses=None,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
all_projections=True,
freeze_blocks=None,
freeze_bn=False,
use_pooling=False,
use_bias=False,
**kwargs):
"""Construct a fixed-depth vanilla ResNet, based on the architectures from the original paper [1].
Args:
nlayers (int): the number of layers in the desired ResNet (e.g. 18, 34, ..., 152).
input_tensor (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-1`.
head_activation (string): Activation function for classification head.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
all_projections (bool): whether to implement cnn subblocks with all shortcuts connections
forced as 1x1 convolutional layers as mentioned in [1] to enable full pruning of
ResNets. If set as False, the template instantiated will be the classic ResNet template
as in [1] with shortcut connections as skip connections when there is no stride change
and 1x1 convolutional layers (projection layers) when there is a stride change.
Note: The classic template cannot be fully pruned. Only the first N-1 number of layers
in the ResNet subblock can be pruned. All other layers must be added to exclude layers
list while pruning, including conv1 layer.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_pooling (bool): whether to use MaxPooling2D layer after first conv layer or use a
stride of 2 for first convolutional layer in subblock
use_bias(bool): Whether or not to use bias for the conv layers.
Returns:
Model: the output model after applying the ResNet on top of input `x`.
[1] Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
"""
if freeze_blocks is None:
freeze_blocks = []
# Determine proper input shape
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
inputs = keras.layers.Input(shape=input_shape, name='Input')
else:
inputs = input_tensor
freeze0 = 0 in freeze_blocks
freeze1 = 1 in freeze_blocks
freeze2 = 2 in freeze_blocks
freeze3 = 3 in freeze_blocks
freeze4 = 4 in freeze_blocks
activation_kwargs = activation_kwargs or {'name': 'stem_activation'}
x = keras.layers.Conv2D(64, (7, 7),
strides=(2, 2),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv1',
trainable=not freeze0,
use_bias=use_bias)(inputs)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=get_batchnorm_axis(data_format),
trainable=False,
name='bn_conv1')(x)
else:
x = keras.layers.BatchNormalization(axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x)
x = add_activation(activation_type, **activation_kwargs)(x)
first_stride = 2 # Setting stride 1st convolutional subblock.
last_stride = 1 # Setting stride last convolutional subblock.
if use_pooling:
x = keras.layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2), padding='same',
data_format=data_format)(x)
first_stride = 1
last_stride = 2
# Define a block functor which can create blocks.
with arg_scope(
[CNNBlock],
use_batch_norm=use_batch_norm,
all_projections=all_projections,
use_shortcuts=True,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_bn=freeze_bn,
activation_kwargs={},
use_bias=use_bias):
if nlayers == 10:
x = CNNBlock(repeat=1, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=1, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 18:
x = CNNBlock(repeat=2, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=2, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 34:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 50:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 101:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=23, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 152:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=8, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=36, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
else:
raise NotImplementedError(f'A resnet with nlayers={nlayers} is not implemented.')
# Add AveragePooling2D layer if use_pooling is enabled after resnet block.
if use_pooling:
x = keras.layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format,
padding='same')(x)
# Naming model.
model_name = f'resnet{nlayers}'
if not use_pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = keras.models.Model(inputs=inputs, outputs=x, name=model_name)
# Add a dense head of nclasses if enabled.
if add_head:
model = add_dense_head(model, inputs, nclasses, head_activation)
return model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/backbones/resnet_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Activation,
AveragePooling2D,
BatchNormalization,
Conv2D,
Dense,
Dropout,
Flatten,
Input
)
from tensorflow.keras.models import Model
from nvidia_tao_tf2.backbones.utils_tf import (
block,
CONV_KERNEL_INITIALIZER,
DENSE_KERNEL_INITIALIZER,
force_stride16,
round_filters,
round_repeats,
swish
)
DEFAULT_BLOCKS_ARGS = (
{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
)
def EfficientNet(width_coefficient,
depth_coefficient,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation_fn=swish,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation_fn: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
add_head: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `add_head` is False.
It should have exactly 3 inputs channels.
classes: optional number of classes to classify images
into, only to be specified if `add_head` is True.
data_format(str): Keras data format.
freeze_bn(bool): Freeze all the BN layers or not.
freeze_blocks(list): Block IDs to be frozen in this model.
use_bias(bool): Use bias or not for Conv layers that are followed by a BN layer.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
stride16(bool): Limit the total stride of the model to 16 or not, default is stride 32.
This is used for DetectNet_v2. All other use cases will use stride 32.
# Returns
A Keras model instance.
"""
# activation_fn defaults to swish if it is None or empty string
bn_opt = {
'momentum': 0.99,
'epsilon': 1e-3
}
if activation_fn in [None, ""]:
activation_fn = swish
# old_data_format = K.image_data_format()
K.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
if input_tensor is None:
img_input = Input(shape=input_shape, name='Input')
else:
img_input = Input(tensor=input_tensor, shape=input_shape, name='Input')
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
# Build stem
x = img_input
x = Conv2D(
round_filters(32, depth_divisor, width_coefficient),
3,
strides=2,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not bool(0 in freeze_blocks),
data_format=data_format,
name='stem_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='stem_bn', trainable=False)(x)
else:
x = BatchNormalization(axis=bn_axis, name='stem_bn', **bn_opt)(x)
x = Activation(activation_fn, name='stem_activation')(x)
# Build blocks
blocks_args = deepcopy(list(blocks_args))
# in stride 16 mode, force the last stride 2 to be 1.
if stride16:
force_stride16(blocks_args)
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'], depth_divisor, width_coefficient)
args['filters_out'] = round_filters(args['filters_out'], depth_divisor, width_coefficient)
for j in range(round_repeats(args.pop('repeats'), depth_coefficient)):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x, activation_fn, drop_connect_rate * b / blocks,
freeze=bool((i + 1) in freeze_blocks),
freeze_bn=freeze_bn,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
name=f'block{i+1}{chr(j + 97)}_',
**args)
b += 1
# Build top
x = Conv2D(
round_filters(1280, depth_divisor, width_coefficient),
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
trainable=not bool((len(blocks_args) + 1) in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
name='top_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='top_bn', trainable=False)(x)
else:
x = BatchNormalization(axis=bn_axis, name='top_bn', **bn_opt)(x)
x = Activation(activation_fn, name='top_activation')(x)
if add_head:
# global pool as: avg pool + flatten for pruning support
output_shape = x.get_shape().as_list()
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
x = AveragePooling2D(
pool_size=pool_size, name='avg_pool',
data_format=data_format, padding='valid'
)(x)
x = Flatten(name='flatten')(x)
if dropout_rate > 0:
x = Dropout(dropout_rate, name='top_dropout')(x)
# head will always not be frozen
# set the name to 'predictions' to align with that in add_dense_head()
x = Dense(
classes,
activation='softmax',
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='predictions'
)(x)
# Create model.
model = Model(img_input, x, name=model_name)
# restore previous data format
# K.set_image_data_format(old_data_format)
return model
def EfficientNetB0(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B0."""
return EfficientNet(1.0, 1.0, 0.2,
drop_connect_rate=0,
model_name='efficientnet-b0',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activaton_fn=activation_type,
**kwargs)
def EfficientNetB1(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B1."""
return EfficientNet(1.0, 1.1, 0.2,
model_name='efficientnet-b1',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB2(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B2."""
return EfficientNet(1.1, 1.2, 0.3,
model_name='efficientnet-b2',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB3(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B3."""
return EfficientNet(1.2, 1.4, 0.3,
model_name='efficientnet-b3',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB4(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B4."""
return EfficientNet(1.4, 1.8, 0.4,
model_name='efficientnet-b4',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB5(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B5."""
return EfficientNet(1.6, 2.2, 0.4,
model_name='efficientnet-b5',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB6(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B6."""
return EfficientNet(1.8, 2.6, 0.5,
model_name='efficientnet-b6',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB7(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B7."""
return EfficientNet(2.0, 3.1, 0.5,
model_name='efficientnet-b7',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/backbones/efficientnet_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Backbones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/backbones/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet V1 and V2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras import backend
from tensorflow.keras import layers
from tensorflow.keras import models
from nvidia_tao_tf2.backbones.utils_tf import _conv_block, _depthwise_conv_block, \
_inverted_res_block, _make_divisible
from nvidia_tao_tf2.backbones.utils_tf import arg_scope
def MobileNet(inputs,
input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
stride=32,
add_head=True,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
use_batch_norm=True,
activation_type='relu',
freeze_bn=False,
freeze_blocks=None,
use_bias=False):
"""The MobileNet model architecture.
Args:
inputs(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The alpha parameter, defaults to 1.0.
depth_multiplier(int): Depth multiplier for Depthwise Conv, defaults to 1.
dropout(float): Dropout ratio.
stride(int): The total stride of this model.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
# Determine proper input shape and default size.
assert stride in [16, 32], (
f"Only stride 16 and 32 are supported, got {stride}"
)
old_data_format = backend.image_data_format()
backend.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if backend.image_data_format() == 'channels_first':
input_shape = (3, 224, 224)
else:
input_shape = (224, 224, 3)
if inputs is None:
img_input = layers.Input(shape=input_shape, name='Input')
else:
if not backend.is_keras_tensor(inputs):
img_input = layers.Input(tensor=inputs, shape=input_shape, name='Input')
else:
img_input = inputs
with arg_scope([_conv_block, _depthwise_conv_block],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
freeze_bn=freeze_bn,
use_bias=use_bias):
x = _conv_block(img_input, 32, alpha, strides=(2, 2),
trainable=not (0 in freeze_blocks))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1,
trainable=not (1 in freeze_blocks))
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
strides=(2, 2), block_id=2,
trainable=not (2 in freeze_blocks))
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3,
trainable=not (3 in freeze_blocks))
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
strides=(2, 2), block_id=4,
trainable=not (4 in freeze_blocks))
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5,
trainable=not (5 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
strides=(2, 2), block_id=6,
trainable=not (6 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7,
trainable=not (7 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8,
trainable=not (8 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9,
trainable=not (9 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10,
trainable=not (10 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11,
trainable=not (11 in freeze_blocks))
# make it a network with a stride of 32, otherwise, the stride is 16.
if stride == 32:
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
strides=(2, 2), block_id=12,
trainable=not (12 in freeze_blocks))
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13,
trainable=not (13 in freeze_blocks))
if add_head:
x = layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format, padding='valid')(x)
x = layers.Flatten(name='flatten_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'mobilenet'
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
backend.set_image_data_format(old_data_format)
return model
def MobileNetV2(inputs,
input_shape=None,
alpha=1.0,
depth_multiplier=1,
stride=32,
add_head=True,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
activation_type='relu',
all_projections=False,
nclasses=1000,
freeze_bn=False,
freeze_blocks=None,
use_bias=False):
"""The MobileNet V2 model architecture.
Args:
inputs(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The alpha parameter, defaults to 1.0.
depth_multiplier(int): Depth multiplier for Depthwise Conv, defaults to 1.
stride(int): The total stride of this model.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
Returns:
The output tensor.
"""
assert stride in [16, 32], (
f"Only stride 16 and 32 are supported, got {stride}"
)
old_data_format = backend.image_data_format()
backend.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if input_shape is None:
if backend.image_data_format() == 'channels_first':
input_shape = (3, 224, 224)
else:
input_shape = (224, 224, 3)
if inputs is None:
img_input = layers.Input(shape=input_shape, name='Input')
else:
if not backend.is_keras_tensor(inputs):
img_input = layers.Input(tensor=inputs, shape=input_shape, name='Input')
else:
img_input = inputs
first_block_filters = _make_divisible(32 * alpha, 8)
# Use explicit padding.
x = layers.ZeroPadding2D((1, 1), name='conv1_pad')(img_input)
x = layers.Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=use_bias,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not (0 in freeze_blocks))(x)
if use_batch_norm:
if freeze_bn:
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
trainable=False,
name='bn_conv1')(x)
else:
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_conv1')(x)
if activation_type == 'relu6':
x = layers.ReLU(6., name='re_lu_0')(x)
else:
x = layers.ReLU(name='re_lu_0')(x)
with arg_scope([_inverted_res_block],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
all_projections=all_projections,
use_bias=use_bias,
freeze_bn=freeze_bn):
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0,
trainable=not (1 in freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1,
trainable=not (2 in freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2,
trainable=not (3 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3,
trainable=not (4 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4,
trainable=not (5 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5,
trainable=not (6 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
expansion=6, block_id=6,
trainable=not (7 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=7,
trainable=not (8 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=8,
trainable=not (9 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=9,
trainable=not (10 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=10,
trainable=not (11 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=11,
trainable=not (12 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=12,
trainable=not (13 in freeze_blocks))
if stride == 32:
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
expansion=6, block_id=13,
trainable=not (14 in freeze_blocks))
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=14,
trainable=not (15 in freeze_blocks))
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=15,
trainable=not (16 in freeze_blocks))
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
expansion=6, block_id=16,
trainable=not (17 in freeze_blocks))
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(last_block_filters,
kernel_size=1,
use_bias=use_bias,
name='conv_1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not (18 in freeze_blocks))(x)
if use_batch_norm:
if freeze_bn:
x = layers.BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
trainable=False,
name='conv_1_bn')(x)
else:
x = layers.BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name='conv_1_bn')(x)
if activation_type == 'relu6':
x = layers.ReLU(6., name='re_lu_head')(x)
else:
x = layers.ReLU(name='re_lu_head')(x)
if add_head:
x = layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format,
padding='valid')(x)
x = layers.Flatten(name='flatten_1')(x)
x = layers.Dense(nclasses,
activation='softmax',
name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'mobilenet_v2'
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
backend.set_image_data_format(old_data_format)
return model
| tao_tensorflow2_backend-main | nvidia_tao_tf2/backbones/mobilenet_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO utilities for TAO backbones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import inspect
import math
import re
import threading
import tensorflow as tf
from tensorflow import keras
bn_axis_map = {'channels_last': 3, 'channels_first': 1}
SUBBLOCK_IDS = ['1x1', '3x3_reduce', '3x3', '5x5_reduce', '5x5', 'pool', 'pool_proj']
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
_ARGSTACK.append({})
return _ARGSTACK
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _key_op(op):
return getattr(op, "_key_op", str(op))
def _name_op(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length: func.__code__.co_argcount]
def _add_op(op):
key_op = _key_op(op)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS[key_op] = _kwarg_names(op)
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Store the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError(
"When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty."
)
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError(
"list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)"
)
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
if inspect.isclass(op):
# If we decorated a class, use the scope on the initializer
op = op.__init__
key_op = _key_op(op)
if not has_arg_scope(op):
name_module, name_op = _name_op(op)
raise ValueError(
f"{name_module}::{name_op} is not decorated with @add_arg_scope"
)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorate a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = _key_op(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, "_key_op", _key_op(func))
setattr(func_with_args, "__doc__", func.__doc__)
return func_with_args
def has_arg_scope(func):
"""Check whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return _key_op(func) in _DECORATED_OPS
def arg_scoped_arguments(func):
"""Return the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[_key_op(func)]
def add_dense_head(model, inputs, nclasses, activation):
"""Create a model that stacks a dense head on top of a another model. It is also flattened.
Args:
model (Model): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nclasses (int): the amount of outputs of the dense map
activation (string): activation function to use e.g. 'softmax' or 'linear'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
x = model.outputs[0]
head_name = f"head_fc{nclasses}"
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(nclasses, activation=activation, name=head_name)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name=f"{model.name}_fc{nclasses}"
)
return model
def get_batchnorm_axis(data_format):
"""Convert a data_format string to the correct index in a 4 dimensional tensor.
Args:
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
int: the axis corresponding to the `data_format`.
"""
return bn_axis_map[data_format]
class subblock_ids(object):
"""A operator to get index of subblock, overload [] operation."""
def __getitem__(self, key):
"""Generate a subblock ID and return.
Args:
key (int): an index used to generate the subblock ID.
"""
cur = key
subblock_id = ''
while cur >= 0:
ch = chr(ord('a') + cur % 26)
subblock_id = ch + subblock_id
cur = cur // 26 - 1
return subblock_id
class InceptionV1Block(object):
"""A functor for creating a Inception v1 block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
data_format,
kernel_regularizer,
bias_regularizer,
subblocks,
index,
freeze_bn=False,
activation_type='relu',
use_bias=True,
trainable=True,
use_td=False):
"""Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
subblocks (tuple): A tuple of size 6, defining number of feature-maps for
subbblocks in an inception block.
For GoogleNet from "Going deeper with convolutions" by Szegedy, Christian, et. al.
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015
Inception_3a: (64, 96, 128, 16, 32, 32)
Defines Inception block with following parallel branches
1) 64 outputs from 1x1 convolutions
2.1) 96 outputs from 1x1 convolutions --> 2.2) 128 outputs from 3x3 convolutions
3.1) 16 outputs from 1x1 convolutions --> 3.2) 32 outputs from 5x5 convolutions
4.1) Max pooling with 3x3 pooling size --> 4.2) 32 outputs from 1x1 convolutions
the outputs of 1, 2.2, 3.2, and 4.2 are concatenated to produce final output.
index (int): the index of the block to be created.
activation_type (str): activation function type.
freeze_bn(bool): Whether or not to freeze the BN layer.
use_bias(bool): Whether or not to use bias for Conv/Dense, etc.
trainable(bool): Whether or not to set the weights to be trainable.
use_td(bool): Whether or not to wrap the layers into a TimeDistributed layer.
This is useful in FasterRCNN.
"""
self.use_batch_norm = use_batch_norm
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.subblocks = subblocks
self.index = index
self.name = f'inception_{index}'
self.freeze_bn = freeze_bn
self.use_bias = use_bias
self.trainable = trainable
self.use_td = use_td
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
x = self._subblocks(x, name_prefix=self.name)
return x
def _subblocks(self, x, name_prefix=None):
"""Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
nblocks = len(self.subblocks)
if (nblocks != 6):
print("Inception V1 block must have 6 subblocks")
return (x)
if self.use_batch_norm:
bn_axis = get_batchnorm_axis(self.data_format)
# First branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[0],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[0]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x1 = layer(x)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[0]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x1 = layer(x1)
x1 = keras.layers.Activation(self.activation_type)(x1)
# Second branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[1],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[1]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[1]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Second branch is 1x1 conv with padding = 0, and stride = 1 followed by 3x3 conv
layer = keras.layers.Conv2D(
self.subblocks[2],
(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[2]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x2)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[2]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Third branch is 1x1 conv with stride = 1
layer = keras.layers.Conv2D(
self.subblocks[3],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[2]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[3]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Third branch is 1x1 conv with padding = 0, and stride = 1 followed by 5x5 conv
layer = keras.layers.Conv2D(
self.subblocks[4],
(5, 5),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[4]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x3)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[4]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Fourth branch is max pool stride = 1, and a 1x1 conv
layer = keras.layers.MaxPooling2D(
pool_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
name=f'{name_prefix}_{SUBBLOCK_IDS[5]}')
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x)
layer = keras.layers.Conv2D(
self.subblocks[5],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}_{SUBBLOCK_IDS[6]}',
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x4)
if self.use_batch_norm:
_name = f'{name_prefix}_{SUBBLOCK_IDS[5]}_bn'
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
layer.trainable = False
x4 = layer(x4)
x4 = keras.layers.Activation(self.activation_type)(x4)
if self.data_format == 'channels_first':
concat_axis = 1
if self.use_td:
concat_axis += 1
else:
concat_axis = -1
layer = keras.layers.Concatenate(axis=concat_axis, name=f'{name_prefix}_output')
x = layer([x1, x2, x3, x4])
return x
def update_config(model, inputs, config, name_pattern=None):
"""Update the configuration of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the configuration of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
config (dict): dictionary of layer attributes to update.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
# Loop through all layers and update those that have a regularizer.
for layer in model.layers:
if name_pattern is None or re.match(name_pattern, layer.name):
for name, value in config.items():
if hasattr(layer, name):
setattr(layer, name, value)
new_model = model # clone_model(model, [inputs])
new_model.set_weights(model.get_weights())
return new_model
def update_regularizers(model, inputs, kernel_regularizer, bias_regularizer, name_pattern=None):
"""Update the weight decay regularizers of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the regularizers of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
kernel_regularizer (object): regularizer to apply to kernels.
bias_regularizer (object): regularizer to apply to biases.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
config = {'bias_regularizer': bias_regularizer,
'kernel_regularizer': kernel_regularizer}
return update_config(model, inputs, config, name_pattern)
@add_arg_scope
def _conv_block(inputs, filters, alpha, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""Construct a conv block to be used in MobileNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): The alpha parameter for MobileNet to control the final number of filters.
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
filters = int(filters * alpha)
# Use explicit padding here to avoid TF asymmetric padding.
# This will be fused into Conv layer, and TRT inference is faster than TF asymmetric padding.
# For accuracy, we found they are almost the same for the two padding styles.
x = keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
x = keras.layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=use_bias,
strides=strides,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
trainable=False,
name='conv1_bn')(x)
else:
x = keras.layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_block_relu6')(x)
else:
x = keras.layers.ReLU(name='conv_block_relu')(x)
return x
@add_arg_scope
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1),
block_id=1, kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""Depthwise conv block as building blocks for MobileNet.
Args:
inputs(tensor): The input tensor.
pointwise_conv_filters(int): The number of depthwise conv filters.
alpha(float): The alpha parameter for MobileNet.
depth_multiplier(int): The depth multiplier(defaut: 1)
strides(int, tuple): The strides, can be a int or a tuple.
block_id(int): The block_id, used to name the blocks.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
# Also use explicit padding here to avoid TF style padding.
x = keras.layers.ZeroPadding2D((1, 1), name=f'conv_pad_{block_id}')(inputs)
x = keras.layers.DepthwiseConv2D(
(3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=use_bias,
name=f'conv_dw_{block_id}',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
trainable=False,
name=f'conv_dw_{block_id}_bn')(x)
else:
x = keras.layers.BatchNormalization(axis=channel_axis,
name=f'conv_dw_{block_id}_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=f'conv_dw_{block_id}_relu6')(x)
else:
x = keras.layers.ReLU(name=f'conv_dw_{block_id}_relu')(x)
x = keras.layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding='valid',
use_bias=use_bias,
strides=(1, 1),
name=f'conv_pw_{block_id}',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
trainable=False,
name=f'conv_pw_{block_id}_bn')(x)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name=f'conv_pw_{block_id}_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=f'conv_pw_relu6_{block_id}')(x)
else:
x = keras.layers.ReLU(name=f'conv_pw_relu_{block_id}')(x)
return x
@add_arg_scope
def _leaky_conv(inputs, filters, alpha=0.1, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
padding='same', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False, name='conv1', use_td=False):
"""Construct a leaky relu conv block to be used in DarkNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): leaky rate for LeakyReLU
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
padding(str): same or valid.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
name(str): name of the layer.
use_td(bool): use TimeDistributed wrapper or not, default is False.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
_layer = keras.layers.Conv2D(
filters,
kernel,
strides=strides,
padding=padding,
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=use_bias,
trainable=trainable,
name=name)
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
x = _layer(inputs)
if use_batch_norm:
_layer = keras.layers.BatchNormalization(axis=channel_axis, name=name + '_bn')
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
if freeze_bn:
_layer.trainable = False
x = _layer(x)
else:
x = _layer(x)
x = keras.layers.LeakyReLU(alpha=alpha, name=name + '_lrelu')(x)
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@add_arg_scope
def _inverted_res_block(inputs, expansion, stride, alpha, filters,
block_id, kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', all_projections=True,
trainable=True, freeze_bn=False,
use_bias=False):
"""Inverted residual block as building blocks for MobileNet V2.
Args:
inputs(tensor): Input tensor.
expansion(float): Expansion factor of the filter numbers.
stride(int, tuple): Stride of this block.
alpha(float): alpha parameter.
filters(int): Number of filters.
block_id(int): block id for this block, as a name.
kernel_regularizer: Kernel regularizer to be applied.
bias_regularizer: Bias regularizer to be applied.
use_batch_norm(bool): Whether or not to use BN layers.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format, can be channels_first or channels_last.
all_projections(bool): Whether to use all projection layers to replace the shortcuts.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
in_channels = inputs.shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = f'block_{block_id}_'
if block_id:
# Expand
x = keras.layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'expand',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
trainable=False,
name=prefix + 'expand_bn')(x)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=f're_lu_{block_id + 1}')(x)
else:
x = keras.layers.ReLU(name=f're_lu_{block_id + 1}')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
# Use explicit padding
x = keras.layers.ZeroPadding2D((1, 1), name=prefix + 'depthwise_pad')(x)
x = keras.layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=use_bias,
padding='valid',
name=prefix + 'depthwise',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
trainable=False,
name=prefix + 'depthwise_bn')(x)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=prefix + 'relu6')(x)
else:
x = keras.layers.ReLU(name=prefix + 'relu')(x)
# Project
x = keras.layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'project',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
trainable=False,
name=prefix + 'project_bn')(x)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')(x)
if in_channels == pointwise_filters and stride == 1:
if all_projections:
inputs_projected = keras.layers.Conv2D(
in_channels,
kernel_size=1,
padding='valid',
use_bias=False,
activation=None,
name=prefix + 'projected_inputs',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(inputs)
return keras.layers.Add(name=prefix + 'add')([inputs_projected, x])
return keras.layers.Add(name=prefix + 'add')([inputs, x])
return x
def get_uid(base_name):
"""Return a unique ID."""
get_uid.lock.acquire() # noqa pylint: disable=R1732
if base_name not in get_uid.seqn:
get_uid.seqn[base_name] = 0
uid = get_uid.seqn[base_name]
get_uid.seqn[base_name] += 1
get_uid.lock.release()
return uid
get_uid.seqn = {}
get_uid.lock = threading.Lock()
def add_activation(activation_type, **kwargs):
"""Create an activation layer based on activation type and additional arguments.
Note that the needed kwargs depend on the activation type.
Args:
activation_type (str): String to indicate activation type.
kwargs (dict): Additional keyword arguments depending on the activation type.
Returns:
activation_layer (a subclass of keras.layers.Layer): The layer type
depends on activation_type.
"""
if activation_type == 'relu-n':
max_value = kwargs.get('max_value', None)
activation_layer = keras.layers.ReLU(max_value=max_value)
elif activation_type == 'lrelu':
alpha = kwargs['alpha']
activation_layer = keras.layers.LeakyReLU(alpha=alpha)
elif activation_type == 'elu':
alpha = kwargs['alpha']
activation_layer = keras.layers.ELU(alpha=alpha)
else:
activation_layer = keras.layers.Activation(activation_type, **kwargs)
return activation_layer
class CNNBlock(object):
"""A functor for creating a block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
use_shortcuts,
data_format,
kernel_regularizer,
bias_regularizer,
repeat,
stride,
subblocks,
index=None,
activation_type='relu',
freeze_bn=False,
freeze_block=False,
activation_kwargs=None,
dilation_rate=(1, 1),
all_projections=False,
use_bias=True):
"""Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
use_shortcuts (bool): whether shortcuts should be used. A typical ResNet by definition
uses shortcuts, but these can be toggled off to use the same ResNet topology without
the shortcuts.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
repeat (int): repeat number.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
subblocks (list of tuples): A list of tuples defining settings for each consecutive
convolution. Example:
`[(3, 64), (3, 64)]`
The two items in each tuple represents the kernel size and the amount of filters in
a convolution, respectively. The convolutions are added in the order of the list.
index (int): the index of the block to be created.
activation_type (str): activation function type.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
all_projections (bool): A boolean flag to determinte whether all shortcut connections
should be implemented as projection layers to facilitate full pruning or not.
use_bias (bool): whether the layer uses a bias vector.
"""
self.use_batch_norm = use_batch_norm
self.use_shortcuts = use_shortcuts
self.all_projections = all_projections
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.activation_kwargs = activation_kwargs or {}
self.dilation_rate = dilation_rate
self.repeat = repeat
self.stride = stride
self.use_bias = use_bias
self.subblocks = subblocks
self.subblock_ids = subblock_ids()
self.freeze_bn = freeze_bn
self.freeze_block = freeze_block
if index is not None:
self.name = f'block_{index}'
else:
block_uid = get_uid('block') + 1
self.name = f'block_{block_uid}'
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
for i in range(self.repeat):
name = f'{self.name}{self.subblock_ids[i]}_'
if i == 0:
# Set the stride only on the first layer.
stride = self.stride
dimension_changed = True
else:
stride = 1
dimension_changed = False
x = self._subblocks(x,
stride,
dimension_changed,
name_prefix=name,
freeze=self.freeze_block)
return x
def _subblocks(self, x, stride, dimension_changed, name_prefix=None, freeze=False):
"""Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
dimension_changed (bool): This indicates whether the dimension has been changed for this
block. If this is true, then we need to account for the change, or else we will be
unable to re-add the shortcut tensor due to incompatible dimensions. This can be
solved by applying a (1x1) convolution [1]. (The paper also notes the possibility of
zero-padding the shortcut tensor to match any larger output dimension, but this is
not implemented.)
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
bn_axis = get_batchnorm_axis(self.data_format)
shortcut = x
nblocks = len(self.subblocks)
for i in range(nblocks):
kernel_size, filters = self.subblocks[i]
if i == 0:
strides = (stride, stride)
else:
strides = (1, 1)
x = keras.layers.Conv2D(
filters, (kernel_size, kernel_size),
strides=strides,
padding='same',
dilation_rate=self.dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}conv_{i+1}',
trainable=not freeze)(x)
if self.use_batch_norm:
if self.freeze_bn:
x = keras.layers.BatchNormalization(
axis=bn_axis,
trainable=False,
name=f'{name_prefix}bn_{i+1}')(x)
else:
x = keras.layers.BatchNormalization(
axis=bn_axis, name=f'{name_prefix}bn_{i+1}')(x)
if i != nblocks - 1: # All except last conv in block.
x = add_activation(self.activation_type,
name=f'{name_prefix}{self.activation_type}_{i+1}')(x)
if self.use_shortcuts:
if self.all_projections:
# Implementing shortcut connections as 1x1 projection layers irrespective of
# dimension change.
shortcut = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}conv_shortcut',
trainable=not freeze)(shortcut)
if self.use_batch_norm:
if self.freeze_bn:
_name = f'{name_prefix}bn_shortcut'
shortcut = keras.layers.BatchNormalization(
axis=bn_axis,
trainable=False,
name=_name)(shortcut)
else:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name=f'{name_prefix}bn_shortcut')(shortcut)
else:
# Add projection layers to shortcut only if there is a change in dimesion.
if dimension_changed: # Dimension changed.
shortcut = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name=f'{name_prefix}conv_shortcut',
trainable=not freeze)(shortcut)
if self.use_batch_norm:
if self.freeze_bn:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis,
trainable=False,
name=f'{name_prefix}bn_shortcut')(shortcut)
else:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name=f'{name_prefix}bn_shortcut')(shortcut)
x = keras.layers.add([x, shortcut])
x = add_activation(self.activation_type,
name=f'{name_prefix}{self.activation_type}')(x)
return x
@add_arg_scope
def fire_module(inputs, block_id, squeeze, expand, kernel_regularizer=None,
bias_regularizer=None, data_format='channels_first',
trainable=True):
"""The squeeze net fire module architecture.
For details, see https://arxiv.org/pdf/1602.07360.pdf
Args:
inputs(tensor): Input tensor.
block_id(int): Block id for current module
squeeze(int): number of filters for squeeze conv layer
expand(int): number of filters for expand conv layers (1x1 and 3x3)
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
data_format(str): Data format, can be channels_first or channels_last.
trainable(bool): whether to make the conv layer trainable or not.
Returns:
The output tensor.
"""
concat_axis = 1 if data_format == 'channels_first' else 3
x = keras.layers.Conv2D(
squeeze,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_squeeze_conv',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(inputs)
x = keras.layers.Activation('relu', name='fire' + str(block_id) + '_squeeze')(x)
b_1x1 = keras.layers.Conv2D(
expand,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_expand_conv1x1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_1x1 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_1x1')(b_1x1)
b_3x3 = keras.layers.Conv2D(
expand,
kernel_size=(3, 3),
padding='same',
name='fire' + str(block_id) + '_expand_conv3x3',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_3x3 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_3x3')(b_3x3)
return keras.layers.Concatenate(axis=concat_axis, name='fire' + str(block_id))([b_1x1, b_3x3])
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
return x * keras.backend.sigmoid(x)
def mish(x):
"""Mish activation function.
See details: https://arxiv.org/pdf/1908.08681.pdf
Args:
x: input tensor
Returns:
mish(x) = x * tanh(ln(1 + e^x))
"""
return x * tf.math.tanh(tf.math.softplus(x))
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'untruncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
# Arguments
input_size: An integer or tuple/list of 2 integers.
kernel_size: An integer or tuple/list of 2 integers.
# Returns
A tuple.
"""
img_dim = 2 if keras.backend.image_data_format() == 'channels_first' else 1
input_size = keras.backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def round_filters(filters, divisor, width_coefficient):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def block(inputs, activation_fn=swish, drop_rate=0., name='',
filters_in=32, filters_out=16, kernel_size=3, strides=1,
expand_ratio=1, se_ratio=0., id_skip=True, freeze=False,
freeze_bn=False, use_td=False, kernel_regularizer=None,
bias_regularizer=None, use_bias=False, data_format='channels_last'):
"""A mobile inverted residual block.
# Arguments
inputs: input tensor.
activation_fn: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
freeze(bool): Freeze this block or not.
freeze_bn(bool): Freeze all the BN layers in this block or not.
use_td(bool): Use TimeDistributed wrapper layers for this block or not.
This is used to support 5D input tensors, e.g. in FasterRCNN use case.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_bias(bool): Use bias or not for Conv layers followed by a BN layer.
# Returns
output tensor for the block.
"""
bn_opt = {
'momentum': 0.99,
'epsilon': 1e-3
}
bn_axis = 3 if keras.backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'expand_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(inputs)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
layer.trainable = False
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
layer = keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding='same',
use_bias=use_bias,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not freeze,
name=name + 'dwconv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
layer.trainable = False
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
# Global pooling is needed if we are going to support dynamic
# input shape(e.g., in FasterRCNN) for this backbone
# AveragePooling2D requires static input shape, hence cannot work with
# dynamic shapes
if use_td:
# GlobalAveragePooling2D cannot work well with TimeDistributed layer
# because when converted to UFF, GlobalAveragePooling2D becomes Mean
# Op in UFF, and it cannot handle 5D input by itself like Conv2D does.
# So we rely on some manual shape transforms, so it sees 4D input
# (N, R*C, H, W), and reshape back to (N, R, C, 1, 1) after global pooling.
R, C, H, W = x.get_shape().as_list()[1:]
assert None not in (R, C, H, W), (
f"Expect R, C, H, W all not None. While got {(R, C, H, W)}"
)
# Another issue is for pruning. Reshape cannot follow a pruned layer
# in modulus pruning due to dimension change after pruning.
# while for current special case, we essentially reshape to (N, -1, H, W)
# whenever the filter number C changes or not during pruning.
# So in this case, the logic is still correct even if the number C is changed.
# But we cannot hard-code the target shape to (R*C, H, W) in case C changes.
# Instead, the target shape is actually (N, -1, H, W) whenever C changes or not.
se = keras.layers.Reshape((-1, H, W), name=name + 'pre_pool_reshape')(x)
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(se)
layer = keras.layers.Reshape((R, -1, 1, 1), name=name + 'post_pool_reshape')
se = layer(se)
else:
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(x)
# se_shape = (1, 1, filters) if data_format == 'channels_last' else (filters, 1, 1)
# TODO(@yuw): use -1 instead of filters for pruning
# otherwise, "Reshape/Permute is not supported after a pruned layer."
se_shape = (1, 1, -1) if data_format == 'channels_last' else (-1, 1, 1)
se = keras.layers.Reshape(se_shape, name=name + 'se_reshape')(se)
# in reduce and expand conv, set use_bias=True, following
# https://github.com/tensorflow/models/blob/77bf83b493617df6c5cd35b8d8cf495944161d99/
# official/legacy/image_classification/efficientnet/efficientnet_model.py#L287
layer = keras.layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation_fn,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=True,
trainable=not freeze,
data_format=data_format,
name=name + 'se_reduce'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
use_bias=True,
trainable=not freeze,
name=name + 'se_expand'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
x = keras.layers.Multiply(name=name + 'se_excite')([x, se])
# Output phase
layer = keras.layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'project_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
layer.trainable = False
x = layer(x)
if (id_skip is True and strides == 1 and filters_in == filters_out):
if drop_rate > 0:
layer = keras.layers.Dropout(
drop_rate,
noise_shape=(None, 1, 1, 1),
name=name + 'drop',
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
x = keras.layers.Add(name=name + 'add')([x, inputs])
return x
def force_stride16(block_args):
"""Force the block args to make the model have stride 16."""
last_block = -1
for idx, block in enumerate(block_args):
if block['strides'] == 2:
last_block = idx
assert last_block >= 0, (
"Cannot find stride 2 in the block args."
)
# pop the layer with last stride 2 and following layers
# to keep the total stride of 16
block_args = block_args[:last_block]
def add_deconv_layer(
model,
inputs,
use_batch_norm,
filters,
upsampling,
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
padding="same",
):
"""Add a deconv layer.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
upsampling (int): the amount of upsampling the transpose convolution should do.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name (str): layer_name prefix.
Returns:
Model: A model with a deconv layer stacked on top of the `model` input.
"""
if data_format is None:
data_format = keras.backend.image_data_format()
x = model.outputs[0]
if layer_name is not None:
layer_name = f"{layer_name}_m{filters}_d{upsampling}"
x = keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(upsampling, upsampling),
strides=(upsampling, upsampling),
padding=padding,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
)(x)
if use_batch_norm:
if layer_name is not None:
layer_name += "_bn"
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format), name=layer_name
)(x)
if activation_type:
activation_kwargs = activation_kwargs or {}
x = add_activation(activation_type, **activation_kwargs)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name=f"{model.name}_d{upsampling}"
)
return model
def add_deconv_head(
model,
inputs,
nmaps,
upsampling,
activation_type="sigmoid",
activation_kwargs=None,
data_format=None,
padding="same",
):
"""Create a model that stacks a deconvolutional (transpose conv) head on top of another model.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nmaps (int): the amount of maps (output filters) the transpose convolution should
have.
upsampling (int): the amount of upsampling the transpose convolution should do.
activation_type (str): activation function name, e.g., 'softmax'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
return add_deconv_layer(
model,
inputs,
use_batch_norm=False,
filters=nmaps,
upsampling=upsampling,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
data_format=data_format,
kernel_regularizer=None,
bias_regularizer=None,
layer_name="head_deconv",
padding=padding,
)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/backbones/utils_tf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BYOM root module."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit classification root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/__init__.py |
"""Module containing the config for classification."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
from nvidia_tao_tf2.common.config.mlops import ClearMLConfig, WandBConfig
@dataclass
class RegConfig:
"""Regularizer config."""
type: str = 'L2'
scope: List[str] = field(default_factory=lambda: ['conv2d', 'dense'])
weight_decay: float = 0.000015
@dataclass
class BNConfig:
"""Batchnorm config."""
momentum: float = 0.9
epsilon: float = 1e-5
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer: str = 'sgd'
lr: float = 0.05
decay: float = 0.0001
epsilon: float = 0.0001
rho: float = 0.5
beta_1: float = 0.99
beta_2: float = 0.99
momentum: float = 0.99
nesterov: bool = True
@dataclass
class LRConfig:
"""Learning rate config."""
scheduler: str = 'cosine' # soft_anneal, step
learning_rate: float = 0.05
soft_start: float = 0.05
annealing_points: List[float] = field(default_factory=lambda: [0.33, 0.66, 0.88])
annealing_divider: float = 10
min_lr_ratio: float = 0.00003
gamma: float = 0.000015
step_size: int = 10
@dataclass
class TrainConfig:
"""Train config."""
qat: bool = False
checkpoint: str = ''
checkpoint_interval: int = 1
batch_size_per_gpu: int = 64
num_epochs: int = 100
n_workers: int = 10
random_seed: int = 42
label_smoothing: float = 0.01
reg_config: RegConfig = RegConfig()
bn_config: BNConfig = BNConfig()
lr_config: LRConfig = LRConfig()
optim_config: OptimConfig = OptimConfig()
wandb: WandBConfig = WandBConfig(
name="classification",
tags=["classification", "training", "tao-toolkit"]
)
clearml: ClearMLConfig = ClearMLConfig(
task="classification_train",
tags=["classification", "training", "tao-toolkit"]
)
results_dir: Optional[str] = None
@dataclass
class AugmentConfig:
"""Augment config."""
enable_random_crop: bool = True
enable_center_crop: bool = True
enable_color_augmentation: bool = False
disable_horizontal_flip: bool = False
mixup_alpha: float = 0
@dataclass
class DataConfig:
"""Data config."""
train_dataset_path: str = MISSING
val_dataset_path: str = MISSING
preprocess_mode: str = 'caffe'
image_mean: List[float] = field(default_factory=lambda: [103.939, 116.779, 123.68])
augmentation: AugmentConfig = AugmentConfig()
num_classes: int = MISSING
@dataclass
class ModelConfig:
"""Model config."""
backbone: str = 'resnet_18'
input_width: int = 224
input_height: int = 224
input_channels: int = 3
input_image_depth: int = 8
use_batch_norm: bool = True
use_bias: bool = False
use_pooling: bool = True
all_projections: bool = False
freeze_bn: bool = False
freeze_blocks: List[int] = field(default_factory=lambda: [])
retain_head: bool = False
dropout: float = 0.0
resize_interpolation_method: str = 'bilinear' # 'bicubic'
activation_type: Optional[str] = None # only used in efficientnets
byom_model: str = ''
@dataclass
class EvalConfig:
"""Eval config."""
dataset_path: str = MISSING
checkpoint: str = MISSING
trt_engine: Optional[str] = None
batch_size: int = 64
n_workers: int = 64
top_k: int = 3
classmap: str = ""
results_dir: Optional[str] = None
@dataclass
class ExportConfig:
"""Export config."""
checkpoint: str = MISSING
onnx_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = ""
cal_cache_file: str = ""
cal_batch_size: int = 1
cal_batches: int = 1
cal_data_file: str = ""
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp32"
max_workspace_size: int = 2 # in Gb
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
onnx_file: str = MISSING
trt_engine: Optional[str] = None
tensorrt: TrtConfig = TrtConfig()
@dataclass
class InferConfig:
"""Inference config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
image_dir: str = MISSING
classmap: str = MISSING
results_dir: Optional[str] = None
@dataclass
class PruneConfig:
"""Pruning config."""
checkpoint: str = MISSING
byom_model_path: Optional[str] = None
normalizer: str = 'max'
results_dir: Optional[str] = None
equalization_criterion: str = 'union'
granularity: int = 8
threshold: float = MISSING
min_num_filters: int = 16
excluded_layers: List[str] = field(default_factory=lambda: [])
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
dataset: DataConfig = DataConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
export: ExportConfig = ExportConfig()
inference: InferConfig = InferConfig()
prune: PruneConfig = PruneConfig()
gen_trt_engine: GenTrtEngineConfig = GenTrtEngineConfig()
results_dir: str = MISSING
encryption_key: Optional[str] = None
data_format: str = 'channels_first'
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/classification/config/default_config.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.