python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import argparse
import numpy as np
import tensorflow as tf
from infer import TensorRTInfer
from image_batcher import ImageBatcher
from visualize import visualize_detections, concat_visualizations
class TensorFlowInfer:
"""
Implements TensorFlow inference of a saved model, following the same API as the TensorRTInfer class.
"""
def __init__(self, saved_model_path):
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
self.model = tf.saved_model.load(saved_model_path)
self.pred_fn = self.model.signatures['serving_default']
# Setup I/O bindings
self.inputs = []
fn_inputs = self.pred_fn.structured_input_signature[1]
for i, input in enumerate(list(fn_inputs.values())):
self.inputs.append({
'index': i,
'name': input.name,
'dtype': np.dtype(input.dtype.as_numpy_dtype()),
'shape': [1, 512, 512, 3], # This can be overridden later
})
self.outputs = []
fn_outputs = self.pred_fn.structured_outputs
for i, output in enumerate(list(fn_outputs.values())):
self.outputs.append({
'index': i,
'name': output.name,
'dtype': np.dtype(output.dtype.as_numpy_dtype()),
'shape': output.shape.as_list(),
})
def override_input_shape(self, input, shape):
self.inputs[input]['shape'] = shape
def input_spec(self):
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
return self.outputs[0]['shape'], self.outputs[0]['dtype']
def infer(self, batch, scales=None, nms_threshold=None):
# Process I/O and execute the network
input = {self.inputs[0]['name']: tf.convert_to_tensor(batch)}
output = self.pred_fn(**input)
# Extract the results depending on what kind of saved model this is
boxes = None
scores = None
classes = None
if len(self.outputs) == 1:
# Detected as AutoML Saved Model
assert len(self.outputs[0]['shape']) == 3 and self.outputs[0]['shape'][2] == 7
results = output[self.outputs[0]['name']].numpy()
boxes = results[:, :, 1:5]
scores = results[:, :, 5]
classes = results[:, :, 6].astype(np.int32)
elif len(self.outputs) >= 4:
# Detected as TFOD Saved Model
assert output['num_detections']
num = int(output['num_detections'].numpy().flatten()[0])
boxes = output['detection_boxes'].numpy()[:, 0:num, :]
scores = output['detection_scores'].numpy()[:, 0:num]
classes = output['detection_classes'].numpy()[:, 0:num]
# Process the results
detections = [[]]
normalized = (np.max(boxes) < 2.0)
for n in range(scores.shape[1]):
if scores[0][n] == 0.0:
break
scale = self.inputs[0]['shape'][2] if normalized else 1.0
if scales:
scale /= scales[0]
if nms_threshold and scores[0][n] < nms_threshold:
continue
detections[0].append({
'ymin': boxes[0][n][0] * scale,
'xmin': boxes[0][n][1] * scale,
'ymax': boxes[0][n][2] * scale,
'xmax': boxes[0][n][3] * scale,
'score': scores[0][n],
'class': int(classes[0][n]) - 1,
})
return detections
def run(batcher, inferer, framework, nms_threshold=None):
res_images = []
res_detections = []
for batch, images, scales in batcher.get_batch():
res_detections += inferer.infer(batch, scales, nms_threshold)
res_images += images
print("Processing {} / {} images ({})".format(batcher.image_index, batcher.num_images, framework), end="\r")
print()
return res_images, res_detections
def parse_annotations(annotations_path):
annotations = {}
if annotations_path and os.path.exists(annotations_path):
with open(annotations_path) as f:
ann_json = json.load(f)
for ann in ann_json['annotations']:
img_id = ann['image_id']
if img_id not in annotations.keys():
annotations[img_id] = []
annotations[img_id].append({
'ymin': ann['bbox'][1],
'xmin': ann['bbox'][0],
'ymax': ann['bbox'][1] + ann['bbox'][3],
'xmax': ann['bbox'][0] + ann['bbox'][2],
'score': -1,
'class': ann['category_id'] - 1,
})
return annotations
def compare_images(tf_images, tf_detections, trt_images, trt_detections, output_dir, annotations_path, labels_path):
labels = []
if labels_path and os.path.exists(labels_path):
with open(labels_path) as f:
for i, label in enumerate(f):
labels.append(label.strip())
annotations = parse_annotations(annotations_path)
count = 1
for tf_img, tf_det, trt_img, trt_det in zip(tf_images, tf_detections, trt_images, trt_detections):
vis = []
names = []
colors = []
vis.append(visualize_detections(tf_img, None, tf_det, labels))
names.append("TensorFlow")
colors.append("DarkOrange")
vis.append(visualize_detections(trt_img, None, trt_det, labels))
names.append("TensorRT")
colors.append("YellowGreen")
if annotations:
img_id = os.path.splitext(os.path.basename(trt_img))[0]
if img_id.isnumeric():
img_id = int(img_id)
if img_id in annotations.keys():
vis.append(visualize_detections(trt_img, None, annotations[img_id], labels))
names.append("Ground Truth")
colors.append("RoyalBlue")
else:
print("Image {} does not have a COCO annotation, skipping ground truth visualization".format(trt_img))
basename = os.path.splitext(os.path.basename(tf_img))[0]
output_path = os.path.join(output_dir, "{}.compare.png".format(basename))
os.makedirs(output_dir, exist_ok=True)
concat_visualizations(vis, names, colors, output_path)
print("Processing {} / {} images (Visualization)".format(count, len(tf_images)), end="\r")
count += 1
print()
def main(args):
tf_infer = TensorFlowInfer(args.saved_model)
trt_infer = TensorRTInfer(args.engine)
trt_batcher = ImageBatcher(args.input, *trt_infer.input_spec(), max_num_images=args.num_images)
tf_infer.override_input_shape(0, [1, trt_batcher.height, trt_batcher.width, 3]) # Same size input in TF as TRT
tf_batcher = ImageBatcher(args.input, *tf_infer.input_spec(), max_num_images=args.num_images)
tf_images, tf_detections = run(tf_batcher, tf_infer, "TensorFlow", args.nms_threshold)
trt_images, trt_detections = run(trt_batcher, trt_infer, "TensorRT", args.nms_threshold)
compare_images(tf_images, tf_detections, trt_images, trt_detections, args.output, args.annotations, args.labels)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with")
parser.add_argument("-m", "--saved_model", help="The TensorFlow saved model path to validate against")
parser.add_argument("-i", "--input",
help="The input to infer, either a single image path, or a directory of images")
parser.add_argument("-o", "--output", default=None, help="Directory where to save the visualization results")
parser.add_argument("-l", "--labels", default="./labels_coco.txt",
help="File to use for reading the class labels from, default: ./labels_coco.txt")
parser.add_argument("-a", "--annotations", default=None,
help="Set the path to the 'instances_val2017.json' file to use for COCO annotations, in which "
"case --input should point to the COCO val2017 dataset, default: not used")
parser.add_argument("-n", "--num_images", default=100, type=int,
help="The maximum number of images to visualize, default: 100")
parser.add_argument("-t", "--nms_threshold", type=float, help="Override the score threshold for the NMS operation, "
"if higher than the threshold in the model/engine.")
args = parser.parse_args()
if not all([args.engine, args.saved_model, args.input, args.output]):
parser.print_help()
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientdet/compare_tf.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import logging
import argparse
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from image_batcher import ImageBatcher
logging.basicConfig(level=logging.INFO)
logging.getLogger("EngineBuilder").setLevel(logging.INFO)
log = logging.getLogger("EngineBuilder")
class EngineCalibrator(trt.IInt8EntropyCalibrator2):
"""
Implements the INT8 Entropy Calibrator 2.
"""
def __init__(self, cache_file):
"""
:param cache_file: The location of the cache file.
"""
super().__init__()
self.cache_file = cache_file
self.image_batcher = None
self.batch_allocation = None
self.batch_generator = None
def set_image_batcher(self, image_batcher: ImageBatcher):
"""
Define the image batcher to use, if any. If using only the cache file, an image batcher doesn't need
to be defined.
:param image_batcher: The ImageBatcher object
"""
self.image_batcher = image_batcher
size = int(np.dtype(self.image_batcher.dtype).itemsize * np.prod(self.image_batcher.shape))
self.batch_allocation = cuda.mem_alloc(size)
self.batch_generator = self.image_batcher.get_batch()
def get_batch_size(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the batch size to use for calibration.
:return: Batch size.
"""
if self.image_batcher:
return self.image_batcher.batch_size
return 1
def get_batch(self, names):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Get the next batch to use for calibration, as a list of device memory pointers.
:param names: The names of the inputs, if useful to define the order of inputs.
:return: A list of int-casted memory pointers.
"""
if not self.image_batcher:
return None
try:
batch, _ = next(self.batch_generator)
log.info("Calibrating image {} / {}".format(self.image_batcher.image_index, self.image_batcher.num_images))
cuda.memcpy_htod(self.batch_allocation, np.ascontiguousarray(batch))
return [int(self.batch_allocation)]
except StopIteration:
log.info("Finished calibration batches")
return None
def read_calibration_cache(self):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Read the calibration cache file stored on disk, if it exists.
:return: The contents of the cache file, if any.
"""
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
log.info("Using calibration cache file: {}".format(self.cache_file))
return f.read()
def write_calibration_cache(self, cache):
"""
Overrides from trt.IInt8EntropyCalibrator2.
Store the calibration cache to a file on disk.
:param cache: The contents of the calibration cache to store.
"""
with open(self.cache_file, "wb") as f:
log.info("Writing calibration cache data to: {}".format(self.cache_file))
f.write(cache)
class EngineBuilder:
"""
Parses an ONNX graph and builds a TensorRT engine from it.
"""
def __init__(self, verbose=False):
"""
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger.
"""
self.trt_logger = trt.Logger(trt.Logger.INFO)
if verbose:
self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE
trt.init_libnvinfer_plugins(self.trt_logger, namespace="")
self.builder = trt.Builder(self.trt_logger)
self.config = self.builder.create_builder_config()
self.config.max_workspace_size = 8 * (2 ** 30) # 8 GB
self.batch_size = None
self.network = None
self.parser = None
def create_network(self, onnx_path):
"""
Parse the ONNX graph and create the corresponding TensorRT network definition.
:param onnx_path: The path to the ONNX graph to load.
"""
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
onnx_path = os.path.realpath(onnx_path)
with open(onnx_path, "rb") as f:
if not self.parser.parse(f.read()):
log.error("Failed to load ONNX file: {}".format(onnx_path))
for error in range(self.parser.num_errors):
log.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
log.info("Network Description")
for input in inputs:
self.batch_size = input.shape[0]
log.info("Input '{}' with shape {} and dtype {}".format(input.name, input.shape, input.dtype))
for output in outputs:
log.info("Output '{}' with shape {} and dtype {}".format(output.name, output.shape, output.dtype))
assert self.batch_size > 0
self.builder.max_batch_size = self.batch_size
def create_engine(self, engine_path, precision, calib_input=None, calib_cache=None, calib_num_images=25000,
calib_batch_size=8, calib_preprocessor=None):
"""
Build the TensorRT engine and serialize it to disk.
:param engine_path: The path where to serialize the engine to.
:param precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
:param calib_input: The path to a directory holding the calibration images.
:param calib_cache: The path where to write the calibration cache to, or if it already exists, load it from.
:param calib_num_images: The maximum number of images to use for calibration.
:param calib_batch_size: The batch size to use for the calibration process.
:param calib_preprocessor: The ImageBatcher preprocessor algorithm to use.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
log.info("Building {} Engine in {}".format(precision, engine_path))
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
log.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
log.warning("INT8 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.INT8)
self.config.int8_calibrator = EngineCalibrator(calib_cache)
if not os.path.exists(calib_cache):
calib_shape = [calib_batch_size] + list(inputs[0].shape[1:])
calib_dtype = trt.nptype(inputs[0].dtype)
self.config.int8_calibrator.set_image_batcher(
ImageBatcher(calib_input, calib_shape, calib_dtype, max_num_images=calib_num_images,
exact_batches=True, preprocessor=calib_preprocessor))
with self.builder.build_engine(self.network, self.config) as engine, open(engine_path, "wb") as f:
log.info("Serializing engine to file: {:}".format(engine_path))
f.write(engine.serialize())
def main(args):
builder = EngineBuilder(args.verbose)
builder.create_network(args.onnx)
builder.create_engine(args.engine, args.precision, args.calib_input, args.calib_cache, args.calib_num_images,
args.calib_batch_size, args.calib_preprocessor)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--onnx", help="The input ONNX model file to load")
parser.add_argument("-e", "--engine", help="The output path for the TRT engine")
parser.add_argument("-p", "--precision", default="fp16", choices=["fp32", "fp16", "int8"],
help="The precision mode to build in, either 'fp32', 'fp16' or 'int8', default: 'fp16'")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable more verbose log output")
parser.add_argument("--calib_input", help="The directory holding images to use for calibration")
parser.add_argument("--calib_cache", default="./calibration.cache",
help="The file path for INT8 calibration cache to use, default: ./calibration.cache")
parser.add_argument("--calib_num_images", default=25000, type=int,
help="The maximum number of images to use for calibration, default: 25000")
parser.add_argument("--calib_batch_size", default=8, type=int,
help="The batch size for the calibration process, default: 1")
parser.add_argument("--calib_preprocessor", default="V2", choices=["V1", "V1MS", "V2"],
help="Set the calibration image preprocessor to use, either 'V2', 'V1' or 'V1MS', default: V2")
args = parser.parse_args()
if not all([args.onnx, args.engine]):
parser.print_help()
log.error("These arguments are required: --onnx and --engine")
sys.exit(1)
if args.precision == "int8" and not any([args.calib_input, args.calib_cache]):
parser.print_help()
log.error("When building in int8 precision, either --calib_input or --calib_cache are required")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientnet/build_engine.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import numpy as np
from infer import TensorRTInfer
from image_batcher import ImageBatcher
def main(args):
annotations = {}
for line in open(args.annotations, "r"):
line = line.strip().split(args.separator)
if len(line) < 2 or not line[1].isnumeric():
print("Could not parse the annotations file correctly, make sure the correct separator is used")
sys.exit(1)
annotations[os.path.basename(line[0])] = int(line[1])
trt_infer = TensorRTInfer(args.engine)
batcher = ImageBatcher(args.input, *trt_infer.input_spec(), preprocessor=args.preprocessor)
top1 = 0
top5 = 0
total = 0
for batch, images in batcher.get_batch():
classes, scores, top = trt_infer.infer(batch, top=5)
for i in range(len(images)):
image = os.path.basename(images[i])
if image not in annotations.keys():
print("Image '{}' does not appear in the annotations file, please make sure all evaluated "
"images have a corresponding ground truth label".format(image))
sys.exit(1)
if annotations[image] == classes[i]:
top1 += 1
if annotations[image] in top[0][i]:
top5 += 1
total += 1
top1_acc = 100 * (top1 / total)
top5_acc = 100 * (top5 / total)
print("Processing {} / {} : Top-1 {:0.1f}% , Top-5: {:0.1f}% ".format(total, batcher.num_images,
top1_acc, top5_acc), end="\r")
print()
print("Top-1 Accuracy: {:0.3f}%".format(top1_acc))
print("Top-5 Accuracy: {:0.3f}%".format(top5_acc))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with")
parser.add_argument("-i", "--input",
help="The input to infer, either a single image path, or a directory of images")
parser.add_argument("-a", "--annotations", help="Set the file to use for classification ground truth annotations")
parser.add_argument("-s", "--separator", default=" ",
help="Separator to use between columns when parsing the annotations file, default: ' ' (space)")
parser.add_argument("-p", "--preprocessor", default="V2", choices=["V1", "V1MS", "V2"],
help="Select the image preprocessor to use, either 'V2', 'V1' or 'V1MS', default: V2")
args = parser.parse_args()
if not all([args.engine, args.input, args.annotations]):
parser.print_help()
print("\nThese arguments are required: --engine --input and --annotations")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientnet/eval_gt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import onnx
import onnx_graphsurgeon as gs
from onnx import shape_inference
import numpy as np
import tensorflow as tf
from tf2onnx import tfonnx, optimizer, tf_loader
def main(args):
# Load saved model
saved_model_path = os.path.realpath(args.saved_model)
assert os.path.isdir(saved_model_path)
graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, "serve", ["serving_default"])
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)
onnx_model = optimizer.optimize_graph(onnx_graph).make_model("Converted from {}".format(saved_model_path))
graph = gs.import_onnx(onnx_model)
assert graph
print()
print("ONNX graph created successfully")
# Set the I/O tensor shapes
graph.inputs[0].shape[0] = args.batch_size
graph.outputs[0].shape[0] = args.batch_size
if args.input_size and args.input_size > 0:
if graph.inputs[0].shape[3] == 3:
# Format NHWC
graph.inputs[0].shape[1] = args.input_size
graph.inputs[0].shape[2] = args.input_size
elif graph.inputs[0].shape[1] == 3:
# Format NCHW
graph.inputs[0].shape[2] = args.input_size
graph.inputs[0].shape[3] = args.input_size
print("ONNX input named '{}' with shape {}".format(graph.inputs[0].name, graph.inputs[0].shape))
print("ONNX output named '{}' with shape {}".format(graph.outputs[0].name, graph.outputs[0].shape))
for i in range(4):
if type(graph.inputs[0].shape[i]) != int or graph.inputs[0].shape[i] <= 0:
print("The input shape of the graph is invalid, try overriding it by giving a fixed size with --input_size")
sys.exit(1)
# Fix Clip Nodes (ReLU6)
for node in [n for n in graph.nodes if n.op == "Clip"]:
for input in node.inputs[1:]:
# In TensorRT, the min/max inputs on a Clip op *must* have fp32 datatype
input.values = np.float32(input.values)
# Run tensor shape inference
graph.cleanup().toposort()
model = shape_inference.infer_shapes(gs.export_onnx(graph))
graph = gs.import_onnx(model)
# Save updated model
graph.cleanup().toposort()
model = gs.export_onnx(graph)
onnx_path = os.path.realpath(args.onnx)
os.makedirs(os.path.dirname(onnx_path), exist_ok=True)
onnx.save(model, onnx_path)
engine_path = os.path.join(os.path.dirname(onnx_path), "engine.trt")
print("ONNX model saved to {}".format(onnx_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--saved_model", help="The TensorFlow saved model directory to load")
parser.add_argument("-o", "--onnx", help="The output ONNX model file to write")
parser.add_argument("-b", "--batch_size", type=int, default=1, help="Set the batch size, default: 1")
parser.add_argument("-i", "--input_size", type=int,
help="Override the input height and width, e.g. '380', default: keep original size")
args = parser.parse_args()
if not all([args.saved_model, args.onnx]):
parser.print_help()
print("\nThese arguments are required: --saved_model and --onnx")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientnet/create_onnx.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import numpy as np
from PIL import Image
class ImageBatcher:
"""
Creates batches of pre-processed images.
"""
def __init__(self, input, shape, dtype, max_num_images=None, exact_batches=False, preprocessor="V2"):
"""
:param input: The input directory to read images from.
:param shape: The tensor shape of the batch to prepare, either in NCHW or NHWC format.
:param dtype: The (numpy) datatype to cast the batched data to.
:param max_num_images: The maximum number of images to read from the directory.
:param exact_batches: This defines how to handle a number of images that is not an exact multiple of the batch
size. If false, it will pad the final batch with zeros to reach the batch size. If true, it will *remove* the
last few images in excess of a batch size multiple, to guarantee batches are exact (useful for calibration).
:param preprocessor: Set the preprocessor to use, V1 or V2, depending on which network is being used.
"""
# Find images in the given input path
input = os.path.realpath(input)
self.images = []
extensions = [".jpg", ".jpeg", ".png", ".bmp"]
def is_image(path):
return os.path.isfile(path) and os.path.splitext(path)[1].lower() in extensions
if os.path.isdir(input):
self.images = [os.path.join(input, f) for f in os.listdir(input) if is_image(os.path.join(input, f))]
self.images.sort()
elif os.path.isfile(input):
if is_image(input):
self.images.append(input)
self.num_images = len(self.images)
if self.num_images < 1:
print("No valid {} images found in {}".format("/".join(extensions), input))
sys.exit(1)
# Handle Tensor Shape
self.dtype = dtype
self.shape = shape
assert len(self.shape) == 4
self.batch_size = shape[0]
assert self.batch_size > 0
self.format = None
self.width = -1
self.height = -1
if self.shape[1] == 3:
self.format = "NCHW"
self.height = self.shape[2]
self.width = self.shape[3]
elif self.shape[3] == 3:
self.format = "NHWC"
self.height = self.shape[1]
self.width = self.shape[2]
assert all([self.format, self.width > 0, self.height > 0])
# Adapt the number of images as needed
if max_num_images and 0 < max_num_images < len(self.images):
self.num_images = max_num_images
if exact_batches:
self.num_images = self.batch_size * (self.num_images // self.batch_size)
if self.num_images < 1:
print("Not enough images to create batches")
sys.exit(1)
self.images = self.images[0:self.num_images]
# Subdivide the list of images into batches
self.num_batches = 1 + int((self.num_images - 1) / self.batch_size)
self.batches = []
for i in range(self.num_batches):
start = i * self.batch_size
end = min(start + self.batch_size, self.num_images)
self.batches.append(self.images[start:end])
# Indices
self.image_index = 0
self.batch_index = 0
self.preprocessor = preprocessor
def preprocess_image(self, image_path):
"""
The image preprocessor loads an image from disk and prepares it as needed for batching. This includes cropping,
resizing, normalization, data type casting, and transposing.
This Image Batcher implements two algorithms:
* V2: The algorithm for EfficientNet V2, as defined in automl/efficientnetv2/preprocessing.py.
* V1: The algorithm for EfficientNet V1, aka "Legacy", as defined in automl/efficientnetv2/preprocess_legacy.py.
:param image_path: The path to the image on disk to load.
:return: A numpy array holding the image sample, ready to be contacatenated into the rest of the batch.
"""
def pad_crop(image):
"""
A subroutine to implement padded cropping. This will create a center crop of the image, padded by 32 pixels.
:param image: The PIL image object
:return: The PIL image object already padded and cropped.
"""
# Assume square images
assert self.height == self.width
width, height = image.size
ratio = self.height / (self.height + 32)
crop_size = int(ratio * min(height, width))
y = (height - crop_size) // 2
x = (width - crop_size) // 2
return image.crop((x, y, x + crop_size, y + crop_size))
image = Image.open(image_path)
image = image.convert(mode='RGB')
if self.preprocessor == "V2":
# For EfficientNet V2: Bilinear Resize and [-1,+1] Normalization
if self.height < 320:
# Padded crop only on smaller sizes
image = pad_crop(image)
image = image.resize((self.width, self.height), resample=Image.BILINEAR)
image = np.asarray(image, dtype=self.dtype)
image = (image - 128.0) / 128.0
elif self.preprocessor == "V1":
# For EfficientNet V1: Padded Crop, Bicubic Resize, and [0,1] Normalization
# (Mean subtraction and Std Dev scaling will be part of the graph, so not done here)
image = pad_crop(image)
image = image.resize((self.width, self.height), resample=Image.BICUBIC)
image = np.asarray(image, dtype=self.dtype)
image = image / 255.0
elif self.preprocessor == "V1MS":
# For EfficientNet V1: Padded Crop, Bicubic Resize, and [0,1] Normalization
# Mean subtraction and Std dev scaling are applied as a pre-processing step outside the graph.
image = pad_crop(image)
image = image.resize((self.width, self.height), resample=Image.BICUBIC)
image = np.asarray(image, dtype=self.dtype)
image = image - np.asarray([123.68, 116.28, 103.53])
image = image / np.asarray([58.395, 57.120, 57.375])
else:
print("Preprocessing method {} not supported".format(self.preprocessor))
sys.exit(1)
if self.format == "NCHW":
image = np.transpose(image, (2, 0, 1))
return image
def get_batch(self):
"""
Retrieve the batches. This is a generator object, so you can use it within a loop as:
for batch, images in batcher.get_batch():
...
Or outside of a batch with the next() function.
:return: A generator yielding two items per iteration: a numpy array holding a batch of images, and the list of
paths to the images loaded within this batch.
"""
for i, batch_images in enumerate(self.batches):
batch_data = np.zeros(self.shape, dtype=self.dtype)
for i, image in enumerate(batch_images):
self.image_index += 1
batch_data[i] = self.preprocess_image(image)
self.batch_index += 1
yield batch_data, batch_images
| TensorRT-master | samples/python/efficientnet/image_batcher.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from image_batcher import ImageBatcher
class TensorRTInfer:
"""
Implements inference for the EfficientNet TensorRT engine.
"""
def __init__(self, engine_path):
"""
:param engine_path: The path to the serialized engine to load from disk.
"""
# Load TRT engine
self.logger = trt.Logger(trt.Logger.ERROR)
with open(engine_path, "rb") as f, trt.Runtime(self.logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
assert self.engine
assert self.context
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
is_input = False
if self.engine.binding_is_input(i):
is_input = True
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
if is_input:
self.batch_size = shape[0]
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape:
size *= s
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""
Get the specs for the input tensor of the network. Useful to prepare memory allocations.
:return: Two items, the shape of the input tensor and its (numpy) datatype.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""
Get the specs for the output tensor of the network. Useful to prepare memory allocations.
:return: Two items, the shape of the output tensor and its (numpy) datatype.
"""
return self.outputs[0]['shape'], self.outputs[0]['dtype']
def infer(self, batch, top=1):
"""
Execute inference on a batch of images. The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
:param batch: A numpy array holding the image batch.
:param top: The number of classes to return as top_predicitons, in descending order by their score. By default,
setting to one will return the same as the maximum score class. Useful for Top-5 accuracy metrics in validation.
:return: Three items, as numpy arrays for each batch image: The maximum score class, the corresponding maximum
score, and a list of the top N classes and scores.
"""
# Prepare the output data
output = np.zeros(*self.output_spec())
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(batch))
self.context.execute_v2(self.allocations)
cuda.memcpy_dtoh(output, self.outputs[0]['allocation'])
# Process the results
classes = np.argmax(output, axis=1)
scores = np.max(output, axis=1)
top = min(top, output.shape[1])
top_classes = np.flip(np.argsort(output, axis=1), axis=1)[:, 0:top]
top_scores = np.flip(np.sort(output, axis=1), axis=1)[:, 0:top]
return classes, scores, [top_classes, top_scores]
def main(args):
trt_infer = TensorRTInfer(args.engine)
batcher = ImageBatcher(args.input, *trt_infer.input_spec(), preprocessor=args.preprocessor)
for batch, images in batcher.get_batch():
classes, scores, top = trt_infer.infer(batch)
for i in range(len(images)):
if args.top == 1:
print(images[i], classes[i], scores[i], sep=args.separator)
else:
line = [images[i]]
assert args.top <= top[0].shape[1]
for t in range(args.top):
line.append(str(top[0][i][t]))
for t in range(args.top):
line.append(str(top[1][i][t]))
print(args.separator.join(line))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with")
parser.add_argument("-i", "--input",
help="The input to infer, either a single image path, or a directory of images")
parser.add_argument("-t", "--top", default=1, type=int,
help="The amount of top classes and scores to output per image, default: 1")
parser.add_argument("-s", "--separator", default="\t",
help="Separator to use between columns when printing the results, default: \\t")
parser.add_argument("-p", "--preprocessor", default="V2", choices=["V1", "V1MS", "V2"],
help="Select the image preprocessor to use, either 'V2', 'V1' or 'V1MS', default: V2")
args = parser.parse_args()
if not all([args.engine, args.input]):
parser.print_help()
print("\nThese arguments are required: --engine and --input")
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientnet/infer.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
from infer import TensorRTInfer
from image_batcher import ImageBatcher
class TensorFlowInfer:
"""
Implements TensorFlow inference of a saved model, following the same API as the TensorRTInfer class.
"""
def __init__(self, saved_model_path):
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
self.model = tf.saved_model.load(saved_model_path)
self.pred_fn = self.model.signatures['serving_default']
# Setup I/O bindings
self.inputs = []
fn_inputs = self.pred_fn.structured_input_signature[1]
for i, input in enumerate(list(fn_inputs.values())):
self.inputs.append({
'index': i,
'name': input.name,
'dtype': np.dtype(input.dtype.as_numpy_dtype()),
'shape': input.shape.as_list(),
})
self.outputs = []
fn_outputs = self.pred_fn.structured_outputs
for i, output in enumerate(list(fn_outputs.values())):
self.outputs.append({
'index': i,
'name': output.name,
'dtype': np.dtype(output.dtype.as_numpy_dtype()),
'shape': output.shape.as_list(),
})
def input_spec(self):
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
return self.outputs[0]['shape'], self.outputs[0]['dtype']
def infer(self, batch, top=1):
# Process I/O and execute the network
input = {self.inputs[0]['name']: tf.convert_to_tensor(batch)}
output = self.pred_fn(**input)
output = output[self.outputs[0]['name']].numpy()
# Read and process the results
classes = np.argmax(output, axis=1)
scores = np.max(output, axis=1)
top = max(top, output.shape[1])
top_classes = np.flip(np.argsort(output, axis=1), axis=1)[:, 0:top]
top_scores = np.flip(np.sort(output, axis=1), axis=1)[:, 0:top]
return classes, scores, [top_classes, top_scores]
def main(args):
# Initialize TRT and TF infer objects.
tf_infer = TensorFlowInfer(args.saved_model)
trt_infer = TensorRTInfer(args.engine)
batcher = ImageBatcher(args.input, *trt_infer.input_spec(), max_num_images=args.num_images,
preprocessor=args.preprocessor)
# Make sure both systems use the same input spec, so we can use the exact same image batches with both
tf_shape, tf_dtype = tf_infer.input_spec()
trt_shape, trt_dtype = trt_infer.input_spec()
if trt_dtype != tf_dtype:
print("Input datatype does not match")
print("TRT Engine Input Dtype: {} {}".format(trt_dtype))
print("TF Saved Model Input Dtype: {} {}".format(tf_dtype))
print("Please use the same TensorFlow saved model that the TensorRT engine was built with")
sys.exit(1)
if (tf_shape[1] and trt_shape[1] != tf_shape[1]) or (tf_shape[2] and trt_shape[2] != tf_shape[2]):
print("Input shapes do not match")
print("TRT Engine Input Shape: {} {}".format(trt_shape[1:]))
print("TF Saved Model Input Shape: {} {}".format(tf_shape[1:]))
print("Please use the same TensorFlow saved model that the TensorRT engine was built with")
sys.exit(1)
match = 0
error = 0
for batch, images in batcher.get_batch():
# Run inference on the same batch with both inference systems
tf_classes, tf_scores, _ = tf_infer.infer(batch)
trt_classes, trt_scores, _ = trt_infer.infer(batch)
# The last batch may not have all image slots filled, so limit the results to only the amount of actual images
tf_classes = tf_classes[0:len(images)]
tf_scores = tf_scores[0:len(images)]
trt_classes = trt_classes[0:len(images)]
trt_scores = trt_scores[0:len(images)]
# Track how many images match on top-1 class id predictions
match += np.sum(trt_classes == tf_classes)
# Track the mean square error in confidence score
error += np.sum((trt_scores - tf_scores) * (trt_scores - tf_scores))
print("Processing {} / {} images: {:.2f}% match ".format(batcher.image_index, batcher.num_images,
(100 * (match / batcher.image_index))), end="\r")
print()
pc = 100 * (match / batcher.num_images)
print("Matching Top-1 class predictions for {} out of {} images: {:.2f}%".format(match, batcher.num_images, pc))
avgerror = np.sqrt(error / batcher.num_images)
print("RMSE between TensorFlow and TensorRT confidence scores: {:.3f}".format(avgerror))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="The TensorRT engine to infer with")
parser.add_argument("-m", "--saved_model", help="The TensorFlow saved model path to validate against")
parser.add_argument("-i", "--input",
help="The input to infer, either a single image path, or a directory of images")
parser.add_argument("-n", "--num_images", default=5000, type=int,
help="The maximum number of images to use for validation, default: 5000")
parser.add_argument("-p", "--preprocessor", default="V2", choices=["V1", "V1MS", "V2"],
help="Select the image preprocessor to use, either 'V2', 'V1' or 'V1MS', default: V2")
args = parser.parse_args()
if not all([args.engine, args.saved_model, args.input]):
parser.print_help()
sys.exit(1)
main(args)
| TensorRT-master | samples/python/efficientnet/compare_tf.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This sample uses a Caffe ResNet50 Model to create a TensorRT Inference Engine
import random
from PIL import Image
import numpy as np
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
class ModelData(object):
MODEL_PATH = "ResNet50_fp32.caffemodel"
DEPLOY_PATH = "ResNet50_N2.prototxt"
INPUT_SHAPE = (3, 224, 224)
OUTPUT_NAME = "prob"
# We can convert TensorRT data types to numpy types with trt.nptype()
DTYPE = trt.float32
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Allocate host and device buffers, and create a stream.
def allocate_buffers(engine):
# Determine dimensions and create page-locked memory buffers (i.e. won't be swapped to disk) to hold host inputs/outputs.
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(ModelData.DTYPE))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(ModelData.DTYPE))
# Allocate device memory for inputs and outputs.
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
return h_input, d_input, h_output, d_output, stream
def do_inference(context, h_input, d_input, h_output, d_output, stream):
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input, h_input, stream)
# Run inference.
context.execute_async(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# The Caffe path is used for Caffe2 models.
def build_engine_caffe(model_file, deploy_file):
# You can set the logger severity higher to suppress messages (or lower to display more messages).
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.CaffeParser() as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
# It should generally be set as high as possible.
config.max_workspace_size = common.GiB(1)
# Load the Caffe model and parse it in order to populate the TensorRT network.
# This function returns an object that we can query to find tensors by name.
model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=ModelData.DTYPE)
# For Caffe, we need to manually mark the output of the network.
# Since we know the name of the output tensor, we can find it in model_tensors.
network.mark_output(model_tensors.find(ModelData.OUTPUT_NAME))
return builder.build_engine(network, config)
def load_normalized_test_case(test_image, pagelocked_buffer):
# Converts the input image to a CHW Numpy array
def normalize_image(image):
# Resize, antialias and transpose the image to CHW.
c, h, w = ModelData.INPUT_SHAPE
return np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1]).astype(trt.nptype(ModelData.DTYPE)).ravel()
# Normalize the image and copy to pagelocked memory.
np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
return test_image
def main():
# Set the data path to the directory that contains the trained models and test images for inference.
_, data_files = common.find_sample_data(description="Runs a ResNet50 network with a TensorRT inference engine.", subfolder="resnet50", find_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg", ModelData.MODEL_PATH, ModelData.DEPLOY_PATH, "class_labels.txt"])
# Get test images, models and labels.
test_images = data_files[0:3]
caffe_model_file, caffe_deploy_file, labels_file = data_files[3:]
labels = open(labels_file, 'r').read().split('\n')
# Build a TensorRT engine.
with build_engine_caffe(caffe_model_file, caffe_deploy_file) as engine:
# Inference is the same regardless of which parser is used to build the engine, since the model architecture is the same.
# Allocate buffers and create a CUDA stream.
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
# Contexts are used to perform inference.
with engine.create_execution_context() as context:
# Load a normalized test case into the host input page-locked buffer.
test_image = random.choice(test_images)
test_case = load_normalized_test_case(test_image, h_input)
# Run the engine. The output will be a 1D tensor of length 1000, where each value represents the
# probability that the image corresponds to that label
do_inference(context, h_input, d_input, h_output, d_output, stream)
# We use the highest probability as our prediction. Its index corresponds to the predicted label.
pred = labels[np.argmax(h_output)]
if "_".join(pred.split()) in os.path.splitext(os.path.basename(test_case))[0]:
print("Correctly recognized " + test_case + " as " + pred)
else:
print("Incorrectly recognized " + test_case + " as " + pred)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/introductory_parser_samples/caffe_resnet50.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
# This sample uses an ONNX ResNet50 Model to create a TensorRT Inference Engine
import random
import sys
import numpy as np
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
from PIL import Image
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
class ModelData(object):
MODEL_PATH = "ResNet50.onnx"
INPUT_SHAPE = (3, 224, 224)
# We can convert TensorRT data types to numpy types with trt.nptype()
DTYPE = trt.float32
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# The Onnx path is used for Onnx models.
def build_engine_onnx(model_file):
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(common.EXPLICIT_BATCH)
config = builder.create_builder_config()
parser = trt.OnnxParser(network, TRT_LOGGER)
config.max_workspace_size = common.GiB(1)
# Load the Onnx model and parse it in order to populate the TensorRT network.
with open(model_file, 'rb') as model:
if not parser.parse(model.read()):
print ('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print (parser.get_error(error))
return None
return builder.build_engine(network, config)
def load_normalized_test_case(test_image, pagelocked_buffer):
# Converts the input image to a CHW Numpy array
def normalize_image(image):
# Resize, antialias and transpose the image to CHW.
c, h, w = ModelData.INPUT_SHAPE
image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1]).astype(trt.nptype(ModelData.DTYPE)).ravel()
# This particular ResNet50 model requires some preprocessing, specifically, mean normalization.
return (image_arr / 255.0 - 0.45) / 0.225
# Normalize the image and copy to pagelocked memory.
np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
return test_image
def main():
# Set the data path to the directory that contains the trained models and test images for inference.
_, data_files = common.find_sample_data(description="Runs a ResNet50 network with a TensorRT inference engine.", subfolder="resnet50", find_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg", ModelData.MODEL_PATH, "class_labels.txt"])
# Get test images, models and labels.
test_images = data_files[0:3]
onnx_model_file, labels_file = data_files[3:]
labels = open(labels_file, 'r').read().split('\n')
# Build a TensorRT engine.
engine = build_engine_onnx(onnx_model_file)
# Inference is the same regardless of which parser is used to build the engine, since the model architecture is the same.
# Allocate buffers and create a CUDA stream.
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
# Contexts are used to perform inference.
context = engine.create_execution_context()
# Load a normalized test case into the host input page-locked buffer.
test_image = random.choice(test_images)
test_case = load_normalized_test_case(test_image, inputs[0].host)
# Run the engine. The output will be a 1D tensor of length 1000, where each value represents the
# probability that the image corresponds to that label
trt_outputs = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
# We use the highest probability as our prediction. Its index corresponds to the predicted label.
pred = labels[np.argmax(trt_outputs[0])]
if "_".join(pred.split()) in os.path.splitext(os.path.basename(test_case))[0]:
print("Correctly recognized " + test_case + " as " + pred)
else:
print("Incorrectly recognized " + test_case + " as " + pred)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/introductory_parser_samples/onnx_resnet50.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This sample uses a UFF ResNet50 Model to create a TensorRT Inference Engine
import random
from PIL import Image
import numpy as np
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
class ModelData(object):
MODEL_PATH = "resnet50-infer-5.uff"
INPUT_NAME = "input"
INPUT_SHAPE = (3, 224, 224)
OUTPUT_NAME = "output"
# We can convert TensorRT data types to numpy types with trt.nptype()
DTYPE = trt.float32
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Allocate host and device buffers, and create a stream.
def allocate_buffers(engine):
# Determine dimensions and create page-locked memory buffers (i.e. won't be swapped to disk) to hold host inputs/outputs.
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(ModelData.DTYPE))
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(ModelData.DTYPE))
# Allocate device memory for inputs and outputs.
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
return h_input, d_input, h_output, d_output, stream
def do_inference(context, h_input, d_input, h_output, d_output, stream):
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input, h_input, stream)
# Run inference.
context.execute_async(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# The UFF path is used for TensorFlow models. You can convert a frozen TensorFlow graph to UFF using the included convert-to-uff utility.
def build_engine_uff(model_file):
# You can set the logger severity higher to suppress messages (or lower to display more messages).
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
# It should generally be set as high as possible.
config.max_workspace_size = common.GiB(1)
# We need to manually register the input and output nodes for UFF.
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
# Load the UFF model and parse it in order to populate the TensorRT network.
parser.parse(model_file, network)
# Build and return an engine.
return builder.build_engine(network, config)
def load_normalized_test_case(test_image, pagelocked_buffer):
# Converts the input image to a CHW Numpy array
def normalize_image(image):
# Resize, antialias and transpose the image to CHW.
c, h, w = ModelData.INPUT_SHAPE
return np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1]).astype(trt.nptype(ModelData.DTYPE)).ravel()
# Normalize the image and copy to pagelocked memory.
np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
return test_image
def main():
# Set the data path to the directory that contains the trained models and test images for inference.
_, data_files = common.find_sample_data(description="Runs a ResNet50 network with a TensorRT inference engine.", subfolder="resnet50", find_files=["binoculars.jpeg", "reflex_camera.jpeg", "tabby_tiger_cat.jpg", ModelData.MODEL_PATH, "class_labels.txt"])
# Get test images, models and labels.
test_images = data_files[0:3]
uff_model_file, labels_file = data_files[3:]
labels = open(labels_file, 'r').read().split('\n')
# Build a TensorRT engine.
with build_engine_uff(uff_model_file) as engine:
# Inference is the same regardless of which parser is used to build the engine, since the model architecture is the same.
# Allocate buffers and create a CUDA stream.
h_input, d_input, h_output, d_output, stream = allocate_buffers(engine)
# Contexts are used to perform inference.
with engine.create_execution_context() as context:
# Load a normalized test case into the host input page-locked buffer.
test_image = random.choice(test_images)
test_case = load_normalized_test_case(test_image, h_input)
# Run the engine. The output will be a 1D tensor of length 1000, where each value represents the
# probability that the image corresponds to that label
do_inference(context, h_input, d_input, h_output, d_output, stream)
# We use the highest probability as our prediction. Its index corresponds to the predicted label.
pred = labels[np.argmax(h_output)]
if "_".join(pred.split()) in os.path.splitext(os.path.basename(test_case))[0]:
print("Correctly recognized " + test_case + " as " + pred)
else:
print("Incorrectly recognized " + test_case + " as " + pred)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/introductory_parser_samples/uff_resnet50.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains functions for training a PyTorch MNIST Model
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import os
from random import randint
# Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
self.fc1 = nn.Linear(800, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)
x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)
x = x.view(-1, 800)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class MnistModel(object):
def __init__(self):
self.batch_size = 64
self.test_batch_size = 100
self.learning_rate = 0.0025
self.sgd_momentum = 0.9
self.log_interval = 100
# Fetch MNIST data set.
self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.test_batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.network = Net()
# Train the network for one or more epochs, validating after each epoch.
def learn(self, num_epochs=2):
# Train the network for a single epoch
def train(epoch):
self.network.train()
optimizer = optim.SGD(self.network.parameters(), lr=self.learning_rate, momentum=self.sgd_momentum)
for batch, (data, target) in enumerate(self.train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = self.network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch * len(data), len(self.train_loader.dataset), 100. * batch / len(self.train_loader), loss.data.item()))
# Test the network
def test(epoch):
self.network.eval()
test_loss = 0
correct = 0
for data, target in self.test_loader:
with torch.no_grad():
data, target = Variable(data), Variable(target)
output = self.network(data)
test_loss += F.nll_loss(output, target).data.item()
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
test_loss /= len(self.test_loader)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(self.test_loader.dataset), 100. * correct / len(self.test_loader.dataset)))
for e in range(num_epochs):
train(e + 1)
test(e + 1)
def get_weights(self):
return self.network.state_dict()
def get_random_testcase(self):
data, target = next(iter(self.test_loader))
case_num = randint(0, len(data) - 1)
test_case = data.numpy()[case_num].ravel().astype(np.float32)
test_name = target.numpy()[case_num]
return test_case, test_name
| TensorRT-master | samples/python/network_api_pytorch_mnist/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
# This sample uses an MNIST PyTorch model to create a TensorRT Inference Engine
import model
import numpy as np
import pycuda.autoinit
import tensorrt as trt
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class ModelData(object):
INPUT_NAME = "data"
INPUT_SHAPE = (1, 1, 28, 28)
OUTPUT_NAME = "prob"
OUTPUT_SIZE = 10
DTYPE = trt.float32
def populate_network(network, weights):
# Configure the network layers based on the weights provided.
input_tensor = network.add_input(name=ModelData.INPUT_NAME, dtype=ModelData.DTYPE, shape=ModelData.INPUT_SHAPE)
conv1_w = weights['conv1.weight'].numpy()
conv1_b = weights['conv1.bias'].numpy()
conv1 = network.add_convolution(input=input_tensor, num_output_maps=20, kernel_shape=(5, 5), kernel=conv1_w, bias=conv1_b)
conv1.stride = (1, 1)
pool1 = network.add_pooling(input=conv1.get_output(0), type=trt.PoolingType.MAX, window_size=(2, 2))
pool1.stride = (2, 2)
conv2_w = weights['conv2.weight'].numpy()
conv2_b = weights['conv2.bias'].numpy()
conv2 = network.add_convolution(pool1.get_output(0), 50, (5, 5), conv2_w, conv2_b)
conv2.stride = (1, 1)
pool2 = network.add_pooling(conv2.get_output(0), trt.PoolingType.MAX, (2, 2))
pool2.stride = (2, 2)
fc1_w = weights['fc1.weight'].numpy()
fc1_b = weights['fc1.bias'].numpy()
fc1 = network.add_fully_connected(input=pool2.get_output(0), num_outputs=500, kernel=fc1_w, bias=fc1_b)
relu1 = network.add_activation(input=fc1.get_output(0), type=trt.ActivationType.RELU)
fc2_w = weights['fc2.weight'].numpy()
fc2_b = weights['fc2.bias'].numpy()
fc2 = network.add_fully_connected(relu1.get_output(0), ModelData.OUTPUT_SIZE, fc2_w, fc2_b)
fc2.get_output(0).name = ModelData.OUTPUT_NAME
network.mark_output(tensor=fc2.get_output(0))
def build_engine(weights):
# For more information on TRT basics, refer to the introductory samples.
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(common.EXPLICIT_BATCH)
config = builder.create_builder_config()
runtime = trt.Runtime(TRT_LOGGER)
config.max_workspace_size = common.GiB(1)
# Populate the network using weights from the PyTorch model.
populate_network(network, weights)
# Build and return an engine.
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
# Loads a random test case from pytorch's DataLoader
def load_random_test_case(model, pagelocked_buffer):
# Select an image at random to be the test case.
img, expected_output = model.get_random_testcase()
# Copy to the pagelocked input buffer
np.copyto(pagelocked_buffer, img)
return expected_output
def main():
common.add_help(description="Runs an MNIST network using a PyTorch model")
# Train the PyTorch model
mnist_model = model.MnistModel()
mnist_model.learn()
weights = mnist_model.get_weights()
# Do inference with TensorRT.
engine = build_engine(weights)
# Build an engine, allocate buffers and create a stream.
# For more information on buffer allocation, refer to the introductory samples.
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
context = engine.create_execution_context()
case_num = load_random_test_case(mnist_model, pagelocked_buffer=inputs[0].host)
# For more information on performing inference, refer to the introductory samples.
# The common.do_inference function will return a list of outputs - we only have one in this case.
[output] = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
pred = np.argmax(output)
print("Test Case: " + str(case_num))
print("Prediction: " + str(pred))
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/network_api_pytorch_mnist/sample.py |
TensorRT-master | samples/python/uff_custom_plugin/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import graphsurgeon as gs
import uff
# lenet5.py
from lenet5 import ModelData
WORKING_DIR = os.environ.get("TRT_WORKING_DIR") or os.path.dirname(os.path.realpath(__file__))
# Path to which trained model will be saved (check README.md)
MODEL_PATH = os.path.join(
WORKING_DIR,
'models/trained_lenet5.pb'
)
# Generates mappings from unsupported TensorFlow operations to TensorRT plugins
def prepare_namespace_plugin_map():
# In this sample, the only operation that is not supported by TensorRT
# is tf.nn.relu6, so we create a new node which will tell UffParser which
# plugin to run and with which arguments in place of tf.nn.relu6.
# The "clipMin" and "clipMax" fields of this TensorFlow node will be parsed by createPlugin,
# and used to create a CustomClipPlugin with the appropriate parameters.
trt_relu6 = gs.create_plugin_node(name="trt_relu6", op="CustomClipPlugin", clipMin=0.0, clipMax=6.0)
namespace_plugin_map = {
ModelData.RELU6_NAME: trt_relu6
}
return namespace_plugin_map
# Transforms model path to uff path (e.g. /a/b/c/d.pb -> /a/b/c/d.uff)
def model_path_to_uff_path(model_path):
uff_path = os.path.splitext(model_path)[0] + ".uff"
return uff_path
# Converts the TensorFlow frozen graphdef to UFF format using the UFF converter
def model_to_uff(model_path):
# Transform graph using graphsurgeon to map unsupported TensorFlow
# operations to appropriate TensorRT custom layer plugins
dynamic_graph = gs.DynamicGraph(model_path)
dynamic_graph.collapse_namespaces(prepare_namespace_plugin_map())
# Save resulting graph to UFF file
output_uff_path = model_path_to_uff_path(model_path)
uff.from_tensorflow(
dynamic_graph.as_graph_def(),
[ModelData.OUTPUT_NAME],
output_filename=output_uff_path,
text=True
)
return output_uff_path
def main():
# Load pretrained model
if not os.path.isfile(MODEL_PATH):
raise IOError("\n{}\n{}\n{}\n".format(
"Failed to load model file ({}).".format(MODEL_PATH),
"Please use 'python lenet5.py' to train and save the model.",
"For more information, see the included README.md"
))
uff_path = model_to_uff(MODEL_PATH)
print("Saved converted UFF model to: " + uff_path)
if __name__ == "__main__":
main()
| TensorRT-master | samples/python/uff_custom_plugin/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import os
import pycuda.autoinit
import sys
import tensorrt as trt
from lenet5 import MODEL_DIR, ModelData
from random import randint
# ../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir
)
)
import common
WORKING_DIR = os.environ.get("TRT_WORKING_DIR") or os.path.dirname(os.path.realpath(__file__))
# Path where clip plugin library will be built (check README.md)
CLIP_PLUGIN_LIBRARY = os.path.join(
WORKING_DIR,
'build/libclipplugin.so'
)
# Path to which trained model will be saved (check README.md)
# Define global logger object (it should be a singleton,
# available for TensorRT from anywhere in code).
# You can set the logger severity higher to suppress messages
# (or lower to display more messages)
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Builds TensorRT Engine
def build_engine(model_path):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser:
config.max_workspace_size = common.GiB(1)
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
parser.parse(model_path, network)
return builder.build_engine(network, config)
def load_test_data():
with open(os.path.join(MODEL_DIR, "x_test.npy"), 'rb') as f:
x_test = np.load(f)
with open(os.path.join(MODEL_DIR, "y_test.npy"), 'rb') as f:
y_test = np.load(f)
return x_test, y_test
# Loads a test case into the provided pagelocked_buffer. Returns loaded test case label.
def load_normalized_test_case(pagelocked_buffer):
x_test, y_test = load_test_data()
num_test = len(x_test)
case_num = randint(0, num_test-1)
img = x_test[case_num].ravel()
np.copyto(pagelocked_buffer, img)
return y_test[case_num]
def main():
# Load the shared object file containing the Clip plugin implementation.
# By doing this, you will also register the Clip plugin with the TensorRT
# PluginRegistry through use of the macro REGISTER_TENSORRT_PLUGIN present
# in the plugin implementation. Refer to plugin/clipPlugin.cpp for more details.
if not os.path.isfile(CLIP_PLUGIN_LIBRARY):
raise IOError("\n{}\n{}\n{}\n".format(
"Failed to load library ({}).".format(CLIP_PLUGIN_LIBRARY),
"Please build the Clip sample plugin.",
"For more information, see the included README.md"
))
ctypes.CDLL(CLIP_PLUGIN_LIBRARY)
# Load pretrained model
model_path = os.path.join(MODEL_DIR, "trained_lenet5.uff")
if not os.path.isfile(model_path):
raise IOError("\n{}\n{}\n{}\n".format(
"Failed to load model file ({}).".format(model_path),
"Please use 'python3 model.py' to train and save the UFF model.",
"For more information, see README.md"
))
# Build an engine and retrieve the image mean from the model.
with build_engine(model_path) as engine:
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
with engine.create_execution_context() as context:
print("\n=== Testing ===")
test_case = load_normalized_test_case(inputs[0].host)
print("Loading Test Case: " + str(test_case))
# The common do_inference function will return a list of outputs - we only have one in this case.
[pred] = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
print("Prediction: " + str(np.argmax(pred)))
if __name__ == "__main__":
main()
| TensorRT-master | samples/python/uff_custom_plugin/sample.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains functions for training a TensorFlow model
import tensorflow as tf
import tensorrt as trt
import numpy as np
import os
WORKING_DIR = os.environ.get("TRT_WORKING_DIR") or os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(
WORKING_DIR,
'models'
)
# MNIST dataset metadata
MNIST_IMAGE_SIZE = 28
MNIST_CHANNELS = 1
MNIST_CLASSES = 10
class ModelData(object):
INPUT_NAME = "InputLayer"
INPUT_SHAPE = (MNIST_CHANNELS, MNIST_IMAGE_SIZE, MNIST_IMAGE_SIZE)
RELU6_NAME = "ReLU6"
OUTPUT_NAME = "OutputLayer/Softmax"
OUTPUT_SHAPE = (MNIST_IMAGE_SIZE, )
DATA_TYPE = trt.float32
def load_data():
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = np.reshape(x_train, (-1, 1, 28, 28))
x_test = np.reshape(x_test, (-1, 1, 28, 28))
return x_train, y_train, x_test, y_test
def build_model():
# Create the keras model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=[1, 28, 28], name="InputLayer"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation(activation=tf.nn.relu6, name="ReLU6"))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax, name="OutputLayer"))
return model
def train_model():
# Build and compile model
model = build_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Load data
x_train, y_train, x_test, y_test = load_data()
np.save(os.path.join(MODEL_DIR, "x_test.npy"), x_test)
np.save(os.path.join(MODEL_DIR, "y_test.npy"), y_test)
# Train the model on the data
model.fit(
x_train, y_train,
epochs = 10,
verbose = 1
)
# Evaluate the model on test data
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Test loss: {}\nTest accuracy: {}".format(test_loss, test_acc))
return model
def maybe_mkdir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def save_model(model):
output_names = model.output.op.name
sess = tf.keras.backend.get_session()
graphdef = sess.graph.as_graph_def()
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graphdef, [output_names])
frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)
# Make directory to save model in if it doesn't exist already
maybe_mkdir(MODEL_DIR)
model_path = os.path.join(MODEL_DIR, "trained_lenet5.pb")
with open(model_path, "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
if __name__ == "__main__":
model = train_model()
save_model(model)
| TensorRT-master | samples/python/uff_custom_plugin/lenet5.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import os
import pycuda.driver as cuda
import pycuda.autoinit
from PIL import Image
import numpy as np
# Returns a numpy buffer of shape (num_images, 1, 28, 28)
def load_mnist_data(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2051
num_images = raw_buf[4:8].view(">i4")[0]
image_c = 1
image_h = raw_buf[8:12].view(">i4")[0]
image_w = raw_buf[12:16].view(">i4")[0]
# Need to scale all values to the range of [0, 1]
return np.ascontiguousarray((raw_buf[16:] / 255.0).astype(np.float32).reshape(num_images, image_c, image_h, image_w))
# Returns a numpy buffer of shape (num_images)
def load_mnist_labels(filepath):
with open(filepath, "rb") as f:
raw_buf = np.fromstring(f.read(), dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2049
num_labels = raw_buf[4:8].view(">i4")[0]
return np.ascontiguousarray(raw_buf[8:].astype(np.int32).reshape(num_labels))
class MNISTEntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, training_data, cache_file, batch_size=64):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = cache_file
# Every time get_batch is called, the next batch of size batch_size will be copied to the device and returned.
self.data = load_mnist_data(training_data)
self.batch_size = batch_size
self.current_index = 0
# Allocate enough memory for a whole batch.
self.device_input = cuda.mem_alloc(self.data[0].nbytes * self.batch_size)
def get_batch_size(self):
return self.batch_size
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
if self.current_index + self.batch_size > self.data.shape[0]:
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.data[self.current_index:self.current_index + self.batch_size].ravel()
cuda.memcpy_htod(self.device_input, batch)
self.current_index += self.batch_size
return [self.device_input]
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
| TensorRT-master | samples/python/int8_caffe_mnist/calibrator.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import random
# For our custom calibrator
from calibrator import load_mnist_data, load_mnist_labels, MNISTEntropyCalibrator
# For ../common.py
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
import common
TRT_LOGGER = trt.Logger()
class ModelData(object):
DEPLOY_PATH = "deploy.prototxt"
MODEL_PATH = "mnist_lenet.caffemodel"
OUTPUT_NAME = "prob"
# The original model is a float32 one.
DTYPE = trt.float32
# This function builds an engine from a Caffe model.
def build_int8_engine(deploy_file, model_file, calib, batch_size=32):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.CaffeParser() as parser, trt.Runtime(TRT_LOGGER) as runtime:
# We set the builder batch size to be the same as the calibrator's, as we use the same batches
# during inference. Note that this is not required in general, and inference batch size is
# independent of calibration batch size.
builder.max_batch_size = batch_size
config.max_workspace_size = common.GiB(1)
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = calib
# Parse Caffe model
model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=ModelData.DTYPE)
network.mark_output(model_tensors.find(ModelData.OUTPUT_NAME))
# Build engine and do int8 calibration.
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def check_accuracy(context, batch_size, test_set, test_labels):
inputs, outputs, bindings, stream = common.allocate_buffers(context.engine)
num_correct = 0
num_total = 0
batch_num = 0
for start_idx in range(0, test_set.shape[0], batch_size):
batch_num += 1
if batch_num % 10 == 0:
print("Validating batch {:}".format(batch_num))
# If the number of images in the test set is not divisible by the batch size, the last batch will be smaller.
# This logic is used for handling that case.
end_idx = min(start_idx + batch_size, test_set.shape[0])
effective_batch_size = end_idx - start_idx
# Do inference for every batch.
inputs[0].host = test_set[start_idx:start_idx + effective_batch_size]
[output] = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream, batch_size=effective_batch_size)
# Use argmax to get predictions and then check accuracy
preds = np.argmax(output.reshape(32, 10)[0:effective_batch_size], axis=1)
labels = test_labels[start_idx:start_idx + effective_batch_size]
num_total += effective_batch_size
num_correct += np.count_nonzero(np.equal(preds, labels))
percent_correct = 100 * num_correct / float(num_total)
print("Total Accuracy: {:}%".format(percent_correct))
def main():
_, data_files = common.find_sample_data(description="Runs a Caffe MNIST network in Int8 mode", subfolder="mnist", find_files=["t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte", "train-images-idx3-ubyte", ModelData.DEPLOY_PATH, ModelData.MODEL_PATH], err_msg="Please follow the README to download the MNIST dataset")
[test_set, test_labels, train_set, deploy_file, model_file] = data_files
# Now we create a calibrator and give it the location of our calibration data.
# We also allow it to cache calibration data for faster engine building.
calibration_cache = "mnist_calibration.cache"
calib = MNISTEntropyCalibrator(test_set, cache_file=calibration_cache)
# Inference batch size can be different from calibration batch size.
batch_size = 32
with build_int8_engine(deploy_file, model_file, calib, batch_size) as engine, engine.create_execution_context() as context:
# Batch size for inference can be different than batch size used for calibration.
check_accuracy(context, batch_size, test_set=load_mnist_data(test_set), test_labels=load_mnist_labels(test_labels))
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/int8_caffe_mnist/sample.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from PIL import Image
import urllib.request
import numpy as np
import argparse
import gzip
import os
# Returns a numpy buffer of shape (num_images, 28, 28)
def load_mnist_data(buffer):
raw_buf = np.fromstring(buffer, dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2051
num_images = raw_buf[4:8].view(">i4")[0]
image_h = raw_buf[8:12].view(">i4")[0]
image_w = raw_buf[12:16].view(">i4")[0]
# Colors in the dataset are inverted vs. what the samples expect.
return np.ascontiguousarray(255 - raw_buf[16:].reshape(num_images, image_h, image_w))
# Returns a list of length num_images
def load_mnist_labels(buffer):
raw_buf = np.fromstring(buffer, dtype=np.uint8)
# Make sure the magic number is what we expect
assert raw_buf[0:4].view(">i4")[0] == 2049
num_labels = raw_buf[4:8].view(">i4")[0]
return list(raw_buf[8:].astype(np.int32).reshape(num_labels))
def main():
parser = argparse.ArgumentParser(description="Extracts 10 PGM files from the MNIST dataset", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-o", "--output", help="Path to the output directory.", default=os.getcwd())
args, _ = parser.parse_known_args()
with urllib.request.urlopen("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz") as res:
data = load_mnist_data(gzip.decompress(res.read()))
with urllib.request.urlopen("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz") as res:
labels = load_mnist_labels(gzip.decompress(res.read()))
output_dir = args.output
# Find one image for each digit.
for i in range(10):
index = labels.index(i)
image = Image.fromarray(data[index], mode="L")
path = os.path.join(output_dir, "{:}.pgm".format(i))
image.save(path)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/scripts/download_mnist_pgms.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import numpy as np
import pycuda.autoinit
import tensorrt as trt
from data_processing import get_inputs, preprocess
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import common
TRT_LOGGER = trt.Logger()
def get_engine(onnx_file_path, engine_file_path):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(common.EXPLICIT_BATCH)
parser = trt.OnnxParser(network, TRT_LOGGER)
runtime = trt.Runtime(TRT_LOGGER)
# Parse model file
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print('Completed parsing of ONNX file')
# Print input info
print('Network inputs:')
for i in range(network.num_inputs):
tensor = network.get_input(i)
print(tensor.name, trt.nptype(tensor.dtype), tensor.shape)
network.get_input(0).shape = [10, 1]
network.get_input(1).shape = [10, 1, 1, 16]
network.get_input(2).shape = [6, 1]
network.get_input(3).shape = [6, 1, 1, 16]
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.REFIT)
config.max_workspace_size = 1 << 28 # 256MiB
print('Building an engine from file {}; this may take a while...'.format(
onnx_file_path))
plan = builder.build_serialized_network(network, config)
engine = runtime.deserialize_cuda_engine(plan)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(plan)
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f:
runtime = trt.Runtime(TRT_LOGGER)
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def main():
onnx_file_path = 'bidaf-modified.onnx'
engine_file_path = "bidaf.trt"
# input
context = 'A quick brown fox jumps over the lazy dog.'
query = 'What color is the fox?'
cw_str, _ = preprocess(context)
# get ravelled data
cw, cc, qw, qc = get_inputs(context, query)
# Do inference with TensorRT
weights_names = ["Parameter576_B_0", "W_0"]
refit_weights_dict = {name : np.load("{}.npy".format(name)) for name in weights_names}
fake_weights_dict = {name : np.ones_like(weights) for name, weights in refit_weights_dict.items()}
engine = get_engine(onnx_file_path, engine_file_path)
refitter = trt.Refitter(engine, TRT_LOGGER)
for weights_dict, answer_correct in [(fake_weights_dict, False), (refit_weights_dict, True)]:
print("Refitting engine...")
# To get a list of all refittable weights' names
# in the network, use refitter.get_all_weights().
# Refit named weights via set_named_weights
for name in weights_names:
refitter.set_named_weights(name, weights_dict[name])
# Get missing weights names. This should return empty
# lists in this case.
missing_weights = refitter.get_missing_weights()
assert len(
missing_weights) == 0, "Refitter found missing weights. Call set_named_weights() or set_weights() for all missing weights"
# Refit the engine with the new weights. This will return True if
# the refit operation succeeded.
assert refitter.refit_cuda_engine()
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
print("Doing inference...")
# Do inference
# Set host input. The common.do_inference_v2 function will copy the input to the GPU before executing.
inputs[0].host = cw
inputs[1].host = cc
inputs[2].host = qw
inputs[3].host = qc
execution_context = engine.create_execution_context()
trt_outputs = common.do_inference_v2(execution_context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
start = np.asscalar(trt_outputs[0])
end = np.asscalar(trt_outputs[1])
answer = [w.encode() for w in cw_str[start:end + 1].reshape(-1)]
assert answer_correct == (answer == [b'brown'])
print("Passed")
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/engine_refit_onnx_bidaf/build_and_refit_engine.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import nltk
from nltk import word_tokenize
import json
import tensorrt as trt
def preprocess(text):
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.asarray([w.lower() for w in tokens]).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs+['']*(16-len(cs)) for cs in chars]
chars = np.asarray(chars).reshape(-1, 1, 1, 16)
return words, chars
def get_map_func(filepath):
file = open(filepath)
category_map = json.load(file)
category_mapper = dict(zip(category_map["cats_strings"], category_map["cats_int64s"]))
default_int64 = category_map["default_int64"]
func = lambda s: category_mapper.get(s, default_int64)
return np.vectorize(func)
def get_inputs(context, query):
cw, cc = preprocess(context)
qw, qc = preprocess(query)
context_word_func = get_map_func("CategoryMapper_4.json")
context_char_func = get_map_func("CategoryMapper_5.json")
query_word_func = get_map_func("CategoryMapper_6.json")
query_char_func = get_map_func("CategoryMapper_7.json")
cw_input = context_word_func(cw).astype(trt.nptype(trt.int32)).ravel()
cc_input = context_char_func(cc).astype(trt.nptype(trt.int32)).ravel()
qw_input = query_word_func(qw).astype(trt.nptype(trt.int32)).ravel()
qc_input = query_char_func(qc).astype(trt.nptype(trt.int32)).ravel()
return cw_input, cc_input, qw_input, qc_input
| TensorRT-master | samples/python/engine_refit_onnx_bidaf/data_processing.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import onnx_graphsurgeon as gs
import onnx
import numpy as np
import json
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from downloader import getFilePath
def drop_category_mapper_nodes(graph):
new_inputs = []
for org_input in graph.inputs:
# head node, simply disconnect it with others
assert len(org_input.outputs) == 1
category_mapper_node = org_input.outputs[0]
assert category_mapper_node.op == 'CategoryMapper'
assert len(category_mapper_node.outputs) == 1
new_inputs.append(category_mapper_node.outputs[0])
category_mapper_node.inputs.clear()
category_mapper_node.outputs.clear()
# Save mapping info to preprocess inputs.
with open(category_mapper_node.name + '.json', 'w') as fp:
json.dump(category_mapper_node.attrs, fp)
graph.inputs = new_inputs
def replace_unsupported_ops(graph):
# replace hardmax with ArgMax
hardmaxes = [node for node in graph.nodes if node.op == "Hardmax"]
assert len(hardmaxes) == 1
hardmax = hardmaxes[0]
hardmax.op = "ArgMax"
hardmax.name = "ArgMax(org:" + hardmax.name + ")"
hardmax.attrs["axis"] = 1
hardmax.attrs["keepdims"] = 0
cast = hardmax.o()
reshape = cast.o()
hardmax.outputs = reshape.outputs
assert len(hardmax.outputs) == 1
hardmax.outputs[0].dtype = np.int64
hardmax.outputs[0].shape = [1]
compress = reshape.o()
compress.op = "Gather"
compress.name = "Gather(org:" + compress.name + ")"
compress.attrs["axis"] = 1
cast.outputs.clear()
reshape.outputs.clear()
# Remove the node from the graph completely
graph.cleanup().toposort()
def save_weights_for_refitting(graph):
# Save weights for refitting
tmap = graph.tensors()
np.save("Parameter576_B_0.npy", tmap["Parameter576_B_0"].values)
np.save("W_0.npy", tmap["W_0"].values)
def main():
org_model_file_path = getFilePath('samples/python/engine_refit_onnx_bidaf/bidaf-original.onnx')
print("Modifying the ONNX model ...")
original_model = onnx.load(org_model_file_path)
graph = gs.import_onnx(original_model)
drop_category_mapper_nodes(graph)
replace_unsupported_ops(graph)
save_weights_for_refitting(graph)
new_model = gs.export_onnx(graph)
modified_model_name = "bidaf-modified.onnx"
onnx.checker.check_model(new_model)
onnx.save(new_model, modified_model_name)
print("Modified ONNX model saved as {}".format(modified_model_name))
print("Done.")
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/engine_refit_onnx_bidaf/prepare_model.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
import tarfile
import pycuda.autoinit
import tensorrt as trt
import utils.coco as coco_utils # COCO dataset descriptors
# Utility functions
import utils.mAP as voc_mAP_utils # mAP computation
from utils.modeldata import ModelData
import utils.voc as voc_utils # VOC dataset descriptors
from PIL import Image
from utils.paths import PATHS # Path management
# VOC and COCO label lists
VOC_CLASSES = voc_utils.VOC_CLASSES_LIST
COCO_LABELS = coco_utils.COCO_CLASSES_LIST
# Model used for inference
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
# Precision command line argument -> TRT Engine datatype
TRT_PRECISION_TO_DATATYPE = {
16: trt.DataType.HALF,
32: trt.DataType.FLOAT
}
# Layout of TensorRT network output metadata
TRT_PREDICTION_LAYOUT = {
"image_id": 0,
"label": 1,
"confidence": 2,
"xmin": 3,
"ymin": 4,
"xmax": 5,
"ymax": 6
}
class Detection(object):
"""Describes detection for VOC detection file.
During evaluation of model on VOC, we save objects detected to result
files, with one file per each class. One line in such file corresponds
to one object that is detected in an image. The Detection class describes
one such detection.
Attributes:
image_number (str): number of image from VOC dataset
confidence (float): confidence score for detection
xmin (float): bounding box min x coordinate
ymin (float): bounding box min y coordinate
xmax (float): bounding box max x coordinate
ymax (float): bounding box max y coordinate
"""
def __init__(self, image_number, confidence, xmin, ymin, xmax, ymax):
self.image_number = image_number
self.confidence = confidence
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __repr__(self):
return "{} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
self.image_number, self.confidence,
self.xmin, self.ymin, self.xmax, self.ymax
)
def write_to_file(self, f):
"""Adds detection corresponding to Detection object to file f.
Args:
f (file): detection results file
"""
f.write(self.__repr__())
def fetch_prediction_field(field_name, detection_out, pred_start_idx):
"""Fetches prediction field from prediction byte array.
After TensorRT inference, prediction data is saved in
byte array and returned by object detection network.
This byte array contains several pieces of data about
prediction - we call one such piece a prediction field.
This function, given prediction byte array returned by network,
staring index of given prediction and field name of interest,
returns prediction field data corresponding to given arguments.
Args:
field_name (str): field of interest, one of keys of TRT_PREDICTION_LAYOUT
detection_out (array): object detection network output
pred_start_idx (int): start index of prediction of interest in detection_out
Returns:
Prediction field corresponding to given data.
"""
return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
def analyze_tensorrt_prediction(detection_out, pred_start_idx):
image_id = int(fetch_prediction_field("image_id", detection_out, pred_start_idx))
label = int(fetch_prediction_field("label", detection_out, pred_start_idx))
confidence = fetch_prediction_field("confidence", detection_out, pred_start_idx)
xmin = fetch_prediction_field("xmin", detection_out, pred_start_idx)
ymin = fetch_prediction_field("ymin", detection_out, pred_start_idx)
xmax = fetch_prediction_field("xmax", detection_out, pred_start_idx)
ymax = fetch_prediction_field("ymax", detection_out, pred_start_idx)
xmin = float(xmin) * ModelData.get_input_width()
ymin = float(ymin) * ModelData.get_input_height()
xmax = float(xmax) * ModelData.get_input_width()
ymax = float(ymax) * ModelData.get_input_height()
return image_id, label, confidence, xmin, ymin, xmax, ymax
def produce_tensorrt_detections(detection_files, trt_inference_wrapper, max_batch_size,
image_numbers, image_path):
"""Fetches output from TensorRT model, and saves it to results file.
The output of TensorRT model is a pair of:
* location byte array that contains detection metadata,
which is layout according to TRT_PREDICTION_LAYOUT
* number of detections returned by NMS
TRT_PREDICTION_LAYOUT fields correspond to Tensorflow ones as follows:
label -> detection_classes
confidence -> detection_scores
xmin, ymin, xmax, ymax -> detection_boxes
The number of detections correspond to num_detection Tensorflow output.
Tensorflow output semantics is more throughly explained in
produce_tensorflow_detections().
This function iterates over all VOC images, feeding each one
into TensotRT model, fetching object detections
from each output, converting them to Detection object,
and saving to detection result file.
Args:
detection_files (dict): dictionary that maps class labels to
class result files
trt_inference_wrapper (TRTInference):
internal Python class wrapping TensorRT inferece
setup/run code
batch_size (int): batch size used for inference
image_numbers [str]: VOC image numbers to use for inference
image_path (str): Python string, which stores path to VOC image file,
when you do image_path.format(voc_mage_number)
"""
total_imgs = len(image_numbers)
for idx in range(0, len(image_numbers), max_batch_size):
imgs = image_numbers[idx:idx+max_batch_size]
batch_size = len(imgs)
print("Infering image {}/{}".format(idx+1, total_imgs))
image_paths = [image_path.format(img) for img in imgs]
detections, keep_count = trt_inference_wrapper.infer_batch(image_paths)
prediction_fields = len(TRT_PREDICTION_LAYOUT)
for img_idx, img_number in enumerate(imgs):
img_predictions_start_idx = prediction_fields * keep_count[img_idx] * img_idx
for det in range(int(keep_count[img_idx])):
_, label, confidence, xmin, ymin, xmax, ymax = \
analyze_tensorrt_prediction(detections, img_predictions_start_idx + det * prediction_fields)
if confidence > 0.0:
label_name = voc_utils.coco_label_to_voc_label(COCO_LABELS[label])
if label_name:
det_file = detection_files[label_name]
detection = Detection(
img_number,
confidence,
xmin,
ymin,
xmax,
ymax,
)
detection.write_to_file(det_file)
def produce_tensorflow_detections(detection_files, tf_inference_wrapper, batch_size,
image_numbers, image_path):
"""Fetches output from Tensorflow model, and saves it to results file.
The format of output from Tensorflow is output_dict Python
dictionary containing following fields:
num_detections: maximum number of detections keeped per image
detection_classes: label of classes detected
detection_scores: confidences for detections
detection_boxes: bounding box coordinates for detections,
in format (ymin, xmin, ymax, xmax)
This function iterates over all VOC images, feeding each one
into Tensorflow model, fetching object detections
from each output, converting them to Detection object,
and saving to detection result file.
Args:
detection_files (dict): dictionary that maps class labels to
class result files
tf_inference_wrapper (TensorflowInference):
internal Python class wrapping Tensorflow inferece
setup/run code
batch_size (int): batch size used for inference
image_numbers [str]: VOC image numbers to use for inference
image_path (str): Python string, which stores path to VOC image file,
when you do image_path.format(voc_mage_number)
"""
total_imgs = len(image_numbers)
for idx in range(0, len(image_numbers), batch_size):
print("Infering image {}/{}".format(idx+1, total_imgs))
imgs = image_numbers[idx:idx+batch_size]
image_paths = [image_path.format(img) for img in imgs]
output_dict = tf_inference_wrapper.infer_batch(image_paths)
keep_count = output_dict['num_detections']
for img_idx, img_number in enumerate(imgs):
for det in range(int(keep_count[img_idx])):
label = output_dict['detection_classes'][img_idx][det]
confidence = output_dict['detection_scores'][img_idx][det]
bbox = output_dict['detection_boxes'][img_idx][det]
# Output bounding boxes are in [0, 1] format,
# here we rescale them to pixel [0, 255] format
ymin, xmin, ymax, xmax = bbox
xmin = float(xmin) * ModelData.get_input_width()
ymin = float(ymin) * ModelData.get_input_height()
xmax = float(xmax) * ModelData.get_input_width()
ymax = float(ymax) * ModelData.get_input_height()
# Detection is saved only if confidence is bigger than zero
if confidence > 0.0:
# Model was trained on COCO, so we need to convert label to VOC one
label_name = voc_utils.coco_label_to_voc_label(COCO_LABELS[label])
if label_name: # Checks for label_name correctness
det_file = detection_files[label_name]
detection = Detection(
img_number,
confidence,
xmin,
ymin,
xmax,
ymax,
)
detection.write_to_file(det_file)
def should_skip_inference(parsed_args):
"""Checks if inference should be skipped.
When evaluating on VOC, if results from some earlier run
of the script exist, we can reuse them to evaluate VOC mAP.
The user can overwrite this behavior by supplying -f flag to the script.
Args:
parsed_args (dict): commandline arguments parsed by
parse_commandline_arguments()
Returns:
bool: if True, script skips inference
"""
skip_inference = True
for voc_class in VOC_CLASSES:
voc_class_detection_file = \
os.path.join(parsed_args['results_dir'], 'det_test_{}.txt'.format(voc_class))
if os.path.exists(voc_class_detection_file) and not parsed_args['force_inference']:
continue
else:
skip_inference = False
if skip_inference:
print("Model detections present - skipping inference. To avoid this, use -f flag.")
return skip_inference
def preprocess_voc():
"""Resizes all VOC images to 300x300 and saves them into .ppm files.
This script assumes all images fetched to network in batches have size 300x300,
so in this function we preproceess all VOC images to fit that format.
"""
voc_root = PATHS.get_voc_dir_path()
voc_jpegs = glob.glob(
os.path.join(voc_root, 'JPEGImages', '*.jpg'))
voc_ppms = glob.glob(
os.path.join(voc_root, 'PPMImages', '*.ppm'))
# Check if preprocessing is needed by comparing
# image names between JPEGImages and PPMImages
voc_jpegs_basenames = \
[os.path.splitext(os.path.basename(p))[0] for p in voc_jpegs]
voc_ppms_basenames = \
[os.path.splitext(os.path.basename(p))[0] for p in voc_ppms]
# If lists are not the same, preprocessing is needed
if sorted(voc_jpegs_basenames) != sorted(voc_ppms_basenames):
print("Preprocessing VOC dataset. It may take few minutes.")
# Make PPMImages directory if it doesn't exist
voc_ppms_path = PATHS.get_voc_ppm_img_path()
if not os.path.exists(os.path.dirname(voc_ppms_path)):
os.makedirs(os.path.dirname(voc_ppms_path))
# For each .jpg file, make a resized
# .ppm copy to fit model input expectations
for voc_jpeg_path in voc_jpegs:
voc_jpeg_basename = os.path.basename(voc_jpeg_path)
voc_ppm_path = voc_ppms_path.format(
os.path.splitext(voc_jpeg_basename)[0])
if not os.path.exists(voc_ppm_path):
img_pil = Image.open(voc_jpeg_path)
img_pil = img_pil.resize(
size=(
ModelData.get_input_width(),
ModelData.get_input_height()),
resample=Image.BILINEAR
)
img_pil.save(voc_ppm_path)
def adjust_paths(args, data_dir):
"""Adjust all file/directory paths, arguments passed by user.
During script launch, user can pass several arguments to the script
(e.g. --workspace_dir, --data), that define where script will look
for the files needed for execution. This function adjusts internal
Paths Python datastructure to accomodate for changes from defaults
requested by user through appropriate command line arguments.
Args:
args (argparse.Namespace): parsed user arguments
data_dir (str): path to the data directory
"""
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
if not os.path.exists(PATHS.get_workspace_dir_path()):
os.makedirs(PATHS.get_workspace_dir_path())
PATHS.set_data_dir_path(data_dir)
def extract_voc_data_if_needed():
if os.path.exists(PATHS.get_voc_dir_path()):
return
voc_archive_path = PATHS.get_data_file_path('VOCtest_06-Nov-2007.tar')
print("Unpacking {}".format(voc_archive_path))
with tarfile.open(voc_archive_path, "r") as tar:
tar.extractall(path=PATHS.get_sample_root())
print("Unpacking done!")
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection evaluation on VOC2007 dataset.')
parser.add_argument('inference_backend', metavar='INFERENCE_BACKEND',
type=str, choices=['tensorrt', 'tensorflow'], default='tensorrt', nargs='?',
help='inference backend to run evaluation with')
parser.add_argument('-p', '--precision', type=int, choices=[32, 16], default=32,
help='desired TensorRT float precision to build an engine with')
parser.add_argument('-b', '--max_batch_size', type=int, default=64,
help='max TensorRT engine batch size')
parser.add_argument('-f', '--force_inference', action='store_true',
help='force model inference even if detections exist')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument('-d', '--data',
help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.")
args, _ = parser.parse_known_args()
data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if data_dir is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
adjust_paths(args, data_dir)
extract_voc_data_if_needed()
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths(should_verify_voc=True)
# Fetch directory to save inference results to, create it if it doesn't exist
trt_engine_datatype = None
trt_engine_path = None
if args.inference_backend == 'tensorrt':
# In case of TensorRT we also fetch engine data type and engine path
trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
trt_engine_path = PATHS.get_engine_path(trt_engine_datatype,
args.max_batch_size)
if not os.path.exists(os.path.dirname(trt_engine_path)):
os.makedirs(os.path.dirname(trt_engine_path))
results_dir = PATHS.get_voc_model_detections_path('tensorrt',
trt_engine_datatype)
elif args.inference_backend == 'tensorflow':
results_dir = PATHS.get_voc_model_detections_path('tensorflow')
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# Return parsed arguments for further functions to use
parsed = {
'inference_backend': args.inference_backend,
'max_batch_size': args.max_batch_size,
'force_inference': args.force_inference,
'results_dir': results_dir,
'trt_engine_path': trt_engine_path,
'trt_engine_datatype': trt_engine_datatype
}
return parsed
def main():
# Parse command line arguments
parsed = parse_commandline_arguments()
# Check if inference should be skipped (if model inference
# results are already computed, we don't need to recompute
# them for VOC mAP computation)
skip_inference = should_skip_inference(parsed)
# And if inference will not be skipped, then we
# create files to store its results in
detection_files = {}
if not skip_inference:
for voc_class in VOC_CLASSES:
detection_files[voc_class] = open(
os.path.join(
parsed['results_dir'], 'det_test_{}.txt'.format(voc_class)
), 'w'
)
# Fetch frozen model .pb path...
ssd_model_pb_path = PATHS.get_model_pb_path(MODEL_NAME)
# ...and .uff path, if needed (converting .pb to .uff if not already done)
if parsed['inference_backend'] == 'tensorrt':
ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
# This block of code sets up and performs inference, if needed
if not skip_inference:
# Preprocess VOC dataset if necessary by resizing images
preprocess_voc()
# Fetch image list and input .ppm files path
with open(PATHS.get_voc_image_set_path(), 'r') as f:
voc_image_numbers = f.readlines()
voc_image_numbers = [line.strip() for line in voc_image_numbers]
voc_image_path = PATHS.get_voc_ppm_img_path()
# Tensorflow and TensorRT paths are a little bit different,
# so we must treat each one individually
if parsed['inference_backend'] == 'tensorrt':
# TRTInference initialization initializes
# all TensorRT structures, creates engine if it doesn't
# already exist and finally saves it to file for future uses
from utils.inference_trt import TRTInference
trt_inference_wrapper = TRTInference(
parsed['trt_engine_path'], ssd_model_uff_path,
parsed['trt_engine_datatype'], parsed['max_batch_size'])
# Outputs from TensorRT are handled differently than
# outputs from Tensorflow, that's why we use another
# function to produce the detections from them
produce_tensorrt_detections(detection_files,
trt_inference_wrapper, parsed['max_batch_size'],
voc_image_numbers, voc_image_path)
elif parsed['inference_backend'] == 'tensorflow':
# In case of Tensorflow all we need to
# initialize inference is frozen model...
from utils.inference_tf import TensorflowInference
tf_inference_wrapper = TensorflowInference(ssd_model_pb_path)
# ...and after initializing it, we can
# proceed to producing detections
produce_tensorflow_detections(detection_files,
tf_inference_wrapper, parsed['max_batch_size'],
voc_image_numbers, voc_image_path)
# Flush detection to files to make sure evaluation is correct
for key in detection_files:
detection_files[key].flush()
# Do mAP computation based on saved detections
voc_mAP_utils.do_python_eval(parsed['results_dir'])
# Close detection files, they are not needed anymore
for key in detection_files:
detection_files[key].close()
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/uff_ssd/voc_evaluation.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import time
import numpy as np
import pycuda.autoinit
import tensorrt as trt
import utils.boxes as boxes_utils # Drawing bounding boxes
import utils.coco as coco_utils # COCO dataset descriptors
from utils.inference_trt import TRTInference # TRT inference wrappers
from PIL import Image
from utils.paths import PATHS # Path management
# COCO label list
COCO_LABELS = coco_utils.COCO_CLASSES_LIST
# Model used for inference
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
# Confidence threshold for drawing bounding box
VISUALIZATION_THRESHOLD = 0.5
# Precision command line argument -> TRT Engine datatype
TRT_PRECISION_TO_DATATYPE = {
16: trt.DataType.HALF,
32: trt.DataType.FLOAT
}
# Layout of TensorRT network output metadata
TRT_PREDICTION_LAYOUT = {
"image_id": 0,
"label": 1,
"confidence": 2,
"xmin": 3,
"ymin": 4,
"xmax": 5,
"ymax": 6
}
def fetch_prediction_field(field_name, detection_out, pred_start_idx):
"""Fetches prediction field from prediction byte array.
After TensorRT inference, prediction data is saved in
byte array and returned by object detection network.
This byte array contains several pieces of data about
prediction - we call one such piece a prediction field.
The prediction fields layout is described in TRT_PREDICTION_LAYOUT.
This function, given prediction byte array returned by network,
staring index of given prediction and field name of interest,
returns prediction field data corresponding to given arguments.
Args:
field_name (str): field of interest, one of keys of TRT_PREDICTION_LAYOUT
detection_out (array): object detection network output
pred_start_idx (int): start index of prediction of interest in detection_out
Returns:
Prediction field corresponding to given data.
"""
return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
def analyze_prediction(detection_out, pred_start_idx, img_pil):
image_id = int(fetch_prediction_field("image_id", detection_out, pred_start_idx))
label = int(fetch_prediction_field("label", detection_out, pred_start_idx))
confidence = fetch_prediction_field("confidence", detection_out, pred_start_idx)
xmin = fetch_prediction_field("xmin", detection_out, pred_start_idx)
ymin = fetch_prediction_field("ymin", detection_out, pred_start_idx)
xmax = fetch_prediction_field("xmax", detection_out, pred_start_idx)
ymax = fetch_prediction_field("ymax", detection_out, pred_start_idx)
if confidence > VISUALIZATION_THRESHOLD:
class_name = COCO_LABELS[label]
confidence_percentage = "{0:.0%}".format(confidence)
print("Detected {} with confidence {}".format(
class_name, confidence_percentage))
boxes_utils.draw_bounding_boxes_on_image(
img_pil, np.array([[ymin, xmin, ymax, xmax]]),
display_str_list=["{}: {}".format(
class_name, confidence_percentage)],
color=coco_utils.COCO_COLORS[label]
)
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection inference on input image.')
parser.add_argument('input_img_path', metavar='INPUT_IMG_PATH',
help='an image file to run inference on')
parser.add_argument('-p', '--precision', type=int, choices=[32, 16], default=32,
help='desired TensorRT float precision to build an engine with')
parser.add_argument('-b', '--max_batch_size', type=int, default=1,
help='max TensorRT engine batch size')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument("-o", "--output",
help="path of the output file",
default=os.path.join(PATHS.get_sample_root(), "image_inferred.jpg"))
parser.add_argument('-d', '--data',
help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.")
args, _ = parser.parse_known_args()
data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if data_dir is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
PATHS.set_data_dir_path(data_dir)
# Set workspace dir path if passed by user
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
try:
os.makedirs(PATHS.get_workspace_dir_path())
except:
pass
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths()
# Fetch TensorRT engine path and datatype
args.trt_engine_datatype = TRT_PRECISION_TO_DATATYPE[args.precision]
args.trt_engine_path = PATHS.get_engine_path(args.trt_engine_datatype,
args.max_batch_size)
try:
os.makedirs(os.path.dirname(args.trt_engine_path))
except:
pass
return args
def main():
# Parse command line arguments
args = parse_commandline_arguments()
# Fetch .uff model path
ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
# Set up all TensorRT data structures needed for inference
trt_inference_wrapper = TRTInference(
args.trt_engine_path, ssd_model_uff_path,
trt_engine_datatype=args.trt_engine_datatype,
batch_size=args.max_batch_size)
# Start measuring time
inference_start_time = time.time()
# Get TensorRT SSD model output
detection_out, keep_count_out = \
trt_inference_wrapper.infer(args.input_img_path)
# Make PIL.Image for drawing bounding boxes and
# let analyze_prediction() draw them based on model output
img_pil = Image.open(args.input_img_path)
prediction_fields = len(TRT_PREDICTION_LAYOUT)
for det in range(int(keep_count_out[0])):
analyze_prediction(detection_out, det * prediction_fields, img_pil)
# Output total [img load + inference + drawing bboxes] time
print("Total time taken for one image: {} ms\n".format(
int(round((time.time() - inference_start_time) * 1000))))
# Save output image and output path
img_pil.save(args.output)
print("Saved output image to: {}".format(args.output))
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/uff_ssd/detect_objects.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import utils.model as model_utils # UFF conversion
from utils.paths import PATHS # Path management
# Model used for inference
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection inference on input image.')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument('-d', '--data',
help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.")
args, _ = parser.parse_known_args()
data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if data_dir is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
PATHS.set_data_dir_path(data_dir)
# Set workspace dir path if passed by user
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
try:
os.makedirs(PATHS.get_workspace_dir_path())
except:
pass
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths()
return args
def main():
# Parse command line arguments
args = parse_commandline_arguments()
# Fetch .uff model path
ssd_model_uff_path = PATHS.get_model_uff_path(MODEL_NAME)
# convert from .pb if needed, using prepare_ssd_model
if not os.path.exists(ssd_model_uff_path):
model_utils.prepare_ssd_model(MODEL_NAME)
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/uff_ssd/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from PIL import Image
import numpy as np
import utils.model as model_utils # UFF conversion uttils
# This class is similar as TRTInference inference, but it manages Tensorflow
class TensorflowInference(object):
def __init__(self, pb_model_path):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
def infer(self, image_path):
img_np = self._load_img(image_path)
return self._run_tensorflow_graph(np.expand_dims(img_np, axis=0))
def infer_batch(self, image_paths):
img_np = self._load_imgs(image_paths)
return self._run_tensorflow_graph(img_np)
def _run_tensorflow_graph(self, image_input):
ops = self.detection_graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes',
'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = self.detection_graph.get_tensor_by_name(
tensor_name)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
output_dict = self.sess.run(tensor_dict,
feed_dict={image_tensor: image_input})
# All outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = output_dict['num_detections'].astype(np.int32)
output_dict['detection_classes'] = output_dict[
'detection_classes'].astype(np.uint8)
return output_dict
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, model_utils.ModelData.get_input_channels())
).astype(np.uint8)
def _load_imgs(self, image_paths):
numpy_array = np.zeros((len(image_paths),) + (300, 300, 3))
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
numpy_array[idx] = img_np
return numpy_array
def _load_img(self, image_path):
img = Image.open(image_path)
img_np = self._load_image_into_numpy_array(img)
return img_np
| TensorRT-master | samples/python/uff_ssd/utils/inference_tf.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# COCO dataset utility functions
import numpy as np
COCO_CLASSES_LIST = [
'unlabeled',
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'street sign',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'hat',
'backpack',
'umbrella',
'shoe',
'eye glasses',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'plate',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'mirror',
'dining table',
'window',
'desk',
'toilet',
'door',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'blender',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
]
COCO_CLASSES_SET = set(COCO_CLASSES_LIST)
COCO_CLASS_ID = {
cls_name: idx for idx, cls_name in enumerate(COCO_CLASSES_LIST)
}
# Random RGB colors for each class (useful for drawing bounding boxes)
COCO_COLORS = \
np.random.uniform(0, 255, size=(len(COCO_CLASSES_LIST), 3)).astype(np.uint8)
def is_coco_label(label):
"""Returns boolean which tells if given label is COCO label.
Args:
label (str): object label
Returns:
bool: is given label a COCO class label
"""
return label in COCO_CLASSES_SET
def get_coco_label_color(label):
"""Returns color corresponding to given COCO label, or None.
Args:
label (str): object label
Returns:
np.array: RGB color described in 3-element np.array
"""
if not is_coco_label(label):
return None
else:
return COCO_COLORS[COCO_CLASS_ID[label]]
| TensorRT-master | samples/python/uff_ssd/utils/coco.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This class contains converted (UFF) model metadata
class ModelData(object):
# Name of input node
INPUT_NAME = "Input"
# CHW format of model input
INPUT_SHAPE = (3, 300, 300)
# Name of output node
OUTPUT_NAME = "NMS"
@staticmethod
def get_input_channels():
return ModelData.INPUT_SHAPE[0]
@staticmethod
def get_input_height():
return ModelData.INPUT_SHAPE[1]
@staticmethod
def get_input_width():
return ModelData.INPUT_SHAPE[2]
| TensorRT-master | samples/python/uff_ssd/utils/modeldata.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# uff_ssd path management singleton class
import os
import sys
import tensorrt as trt
class Paths(object):
def __init__(self):
self._SAMPLE_ROOT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir
)
self._WORKSPACE_DIR_PATH = os.path.join(
self._SAMPLE_ROOT,
'workspace'
)
self._VOC_DIR_PATH = \
os.path.join(self._SAMPLE_ROOT, 'VOCdevkit', 'VOC2007')
self._DATA_DIR_PATH = None
# User configurable paths
def set_data_dir_path(self, data_dir):
self._DATA_DIR_PATH = data_dir
def set_workspace_dir_path(self, workspace_dir):
self._WORKSPACE_DIR_PATH = workspace_dir
def get_workspace_dir_path(self):
return self._WORKSPACE_DIR_PATH
def get_voc_dir_path(self):
return self._VOC_DIR_PATH
# Fixed paths
def get_sample_root(self):
return self._SAMPLE_ROOT
def get_models_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'models')
def get_engines_dir_path(self):
return os.path.join(self.get_workspace_dir_path(), 'engines')
def get_engine_path(self, inference_type=trt.DataType.FLOAT, max_batch_size=1):
inference_type_to_str = {
trt.DataType.FLOAT: 'FLOAT',
trt.DataType.HALF: 'HALF',
trt.DataType.INT32: 'INT32',
trt.DataType.INT8: 'INT8'
}
return os.path.join(
self.get_engines_dir_path(),
inference_type_to_str[inference_type],
'engine_bs_{}.buf'.format(max_batch_size))
def get_data_file_path(self, path):
return os.path.join(self._DATA_DIR_PATH, path)
def get_voc_annotation_cache_path(self):
return os.path.join(self.get_workspace_dir_path(), 'annotations_cache')
def get_voc_image_set_path(self):
return os.path.join(self.get_voc_dir_path(), 'ImageSets', 'Main', 'test.txt')
def get_voc_annotation_path(self):
return os.path.join(self.get_voc_dir_path(), 'Annotations', '{}.xml')
def get_voc_ppm_img_path(self):
return os.path.join(self.get_voc_dir_path(), 'PPMImages', '{}.ppm')
def get_voc_jpg_img_path(self):
return os.path.join(self.get_voc_dir_path(), 'JPEGImages', '{}.jpg')
def get_voc_tensorflow_model_detections_path(self):
return os.path.join(self.get_workspace_dir_path(), 'results', 'tensorflow')
def get_voc_tensorrt_model_detections_path(self, trt_engine_datatype=trt.DataType.FLOAT):
trt_results_path = \
os.path.join(self.get_workspace_dir_path(), 'results', 'tensorrt')
if trt_engine_datatype == trt.DataType.HALF:
return os.path.join(trt_results_path, 'HALF')
else:
return os.path.join(trt_results_path, 'FLOAT')
def get_voc_model_detections_path(self, backend='tensorrt', use_fp16=False):
if backend != 'tensorrt':
return self.get_voc_tensorflow_model_detections_path()
else:
return self.get_voc_tensorrt_model_detections_path(use_fp16)
def get_model_dir_path(self, model_name):
return os.path.join(self.get_models_dir_path(), model_name)
def get_model_pb_path(self, model_name):
return os.path.join(
self.get_model_dir_path(model_name),
'frozen_inference_graph.pb'
)
def get_model_uff_path(self, model_name):
return os.path.join(
self.get_model_dir_path(model_name),
'frozen_inference_graph.uff'
)
# Paths correctness verifier
def verify_all_paths(self, should_verify_voc=False):
error = False
if should_verify_voc:
error = self._verify_voc_paths()
if not os.path.exists(self.get_workspace_dir_path()):
error = True
if not os.path.exists(self._DATA_DIR_PATH):
error = True
if error:
print("An error occured when running the script.")
sys.exit(1)
def _verify_voc_paths(self):
error = False
voc_dir = self.get_voc_dir_path()
voc_image_list = self.get_voc_image_set_path()
# 1) Check if directory and image list file are present
if not os.path.exists(voc_dir) or \
not os.path.exists(voc_image_list):
self._print_incorrect_voc_error(voc_dir)
error = True
# 2) Check if all images listed in image list are present
with open(voc_image_list, 'r') as f:
image_numbers = f.readlines()
image_numbers = [line.strip() for line in image_numbers]
if not self._verify_voc(image_numbers):
self._print_incorrect_voc_error(voc_dir)
error = True
return error
def _verify_voc(self, voc_image_list):
voc_image_path = self.get_voc_jpg_img_path()
for img_number in voc_image_list:
img = voc_image_path.format(img_number)
if not os.path.exists(img):
return False
return True
# Error printers
def _print_incorrect_voc_error(self, voc_dir):
print(
"Error: {}\n{}\n{}".format(
"Incomplete VOC dataset detected (voc_dir: {})".format(voc_dir),
"Try redownloading VOC or check if --data is set up correctly",
"For more details, check README.md"
)
)
PATHS = Paths()
| TensorRT-master | samples/python/uff_ssd/utils/paths.py |
TensorRT-master | samples/python/uff_ssd/utils/__init__.py |
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for drawing bounding boxes on PIL images
import numpy as np
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
def draw_bounding_boxes_on_image(image,
boxes,
color=(255, 0, 0),
thickness=4,
display_str_list=()):
"""Draws bounding boxes on image.
Args:
image (PIL.Image): PIL.Image object
boxes (np.array): a 2 dimensional numpy array
of [N, 4]: (ymin, xmin, ymax, xmax)
The coordinates are in normalized format between [0, 1]
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): bounding box line thickness
display_str_list [str]: list of strings.
Contains one string for each bounding box.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('boxes must be of size [N, 4]')
for i in range(boxes_shape[0]):
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list[i])
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color=(255, 0, 0),
thickness=4,
display_str='',
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
The string passed in display_str is displayed above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the string
is displayed below the bounding box.
Args:
image (PIL.Image): PIL.Image object
ymin (float): ymin of bounding box
xmin (float): xmin of bounding box
ymax (float): ymax of bounding box
xmax (float): xmax of bounding box
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): line thickness
display_str (str): string to display in box
use_normalized_coordinates (bool): If True, treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=tuple(color))
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display string added to the top of the bounding
# box exceeds the top of the image, move the string below the bounding box
# instead of above
display_str_height = font.getsize(display_str)[1]
# Each display_str has a top and bottom margin of 0.05x
total_display_str_height = (1 + 2 * 0.05) * display_str_height
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=tuple(color))
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
| TensorRT-master | samples/python/uff_ssd/utils/boxes.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Model extraction and UFF convertion utils
import os
import sys
import tarfile
import requests
import tensorflow as tf
import tensorrt as trt
import graphsurgeon as gs
import uff
import time
import math
from utils.paths import PATHS
from utils.modeldata import ModelData
# UFF conversion functionality
def ssd_unsupported_nodes_to_plugin_nodes(ssd_graph):
"""Makes ssd_graph TensorRT comparible using graphsurgeon.
This function takes ssd_graph, which contains graphsurgeon
DynamicGraph data structure. This structure describes frozen Tensorflow
graph, that can be modified using graphsurgeon (by deleting, adding,
replacing certain nodes). The graph is modified by removing
Tensorflow operations that are not supported by TensorRT's UffParser
and replacing them with custom layer plugin nodes.
Note: This specific implementation works only for
ssd_inception_v2_coco_2017_11_17 network.
Args:
ssd_graph (gs.DynamicGraph): graph to convert
Returns:
gs.DynamicGraph: UffParser compatible SSD graph
"""
# Create TRT plugin nodes to replace unsupported ops in Tensorflow graph
channels = ModelData.get_input_channels()
height = ModelData.get_input_height()
width = ModelData.get_input_width()
Input = gs.create_plugin_node(name="Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, channels, height, width])
PriorBox = gs.create_plugin_node(name="GridAnchor", op="GridAnchor_TRT",
minSize=0.2,
maxSize=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
variance=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1],
numLayers=6
)
NMS = gs.create_plugin_node(
name="NMS",
op="NMS_TRT",
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=1e-8,
nmsThreshold=0.6,
topK=100,
keepTopK=100,
numClasses=91,
inputOrder=[0, 2, 1],
confSigmoid=1,
isNormalized=1
)
concat_priorbox = gs.create_node(
"concat_priorbox",
op="ConcatV2",
dtype=tf.float32,
axis=2
)
concat_box_loc = gs.create_plugin_node(
"concat_box_loc",
op="FlattenConcat_TRT",
dtype=tf.float32,
axis=1,
ignoreBatch=0
)
concat_box_conf = gs.create_plugin_node(
"concat_box_conf",
op="FlattenConcat_TRT",
dtype=tf.float32,
axis=1,
ignoreBatch=0
)
# Create a mapping of namespace names -> plugin nodes.
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"MultipleGridAnchorGenerator/Concatenate": concat_priorbox,
"MultipleGridAnchorGenerator/Identity": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
# Create a new graph by collapsing namespaces
ssd_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
# If remove_exclusive_dependencies is True, the whole graph will be removed!
ssd_graph.remove(ssd_graph.graph_outputs, remove_exclusive_dependencies=False)
return ssd_graph
def model_to_uff(model_path, output_uff_path, silent=False):
"""Takes frozen .pb graph, converts it to .uff and saves it to file.
Args:
model_path (str): .pb model path
output_uff_path (str): .uff path where the UFF file will be saved
silent (bool): if False, writes progress messages to stdout
"""
dynamic_graph = gs.DynamicGraph(model_path)
dynamic_graph = ssd_unsupported_nodes_to_plugin_nodes(dynamic_graph)
uff.from_tensorflow(
dynamic_graph.as_graph_def(),
[ModelData.OUTPUT_NAME],
output_filename=output_uff_path,
text=True
)
# Model extraction functionality
def maybe_print(should_print, print_arg):
"""Prints message if supplied boolean flag is true.
Args:
should_print (bool): if True, will print print_arg to stdout
print_arg (str): message to print to stdout
"""
if should_print:
print(print_arg)
def maybe_mkdir(dir_path):
"""Makes directory if it doesn't exist.
Args:
dir_path (str): directory path
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def _extract_model(silent=False):
"""Extract model from Tensorflow model zoo.
Args:
silent (bool): if False, writes progress messages to stdout
"""
maybe_print(not silent, "Preparing pretrained model")
model_dir = PATHS.get_models_dir_path()
maybe_mkdir(model_dir)
model_archive_path = PATHS.get_data_file_path('ssd_inception_v2_coco_2017_11_17.tar.gz')
maybe_print(not silent, "Unpacking {}".format(model_archive_path))
with tarfile.open(model_archive_path, "r:gz") as tar:
tar.extractall(path=model_dir)
maybe_print(not silent, "Model ready")
def prepare_ssd_model(model_name="ssd_inception_v2_coco_2017_11_17", silent=False):
"""Extract pretrained object detection model and converts it to UFF.
The model is downloaded from Tensorflow object detection model zoo.
Currently only ssd_inception_v2_coco_2017_11_17 model is supported
due to model_to_uff() using logic specific to that network when converting.
Args:
model_name (str): chosen object detection model
silent (bool): if False, writes progress messages to stdout
"""
if model_name != "ssd_inception_v2_coco_2017_11_17":
raise NotImplementedError(
"Model {} is not supported yet".format(model_name))
_extract_model(silent)
ssd_pb_path = PATHS.get_model_pb_path(model_name)
ssd_uff_path = PATHS.get_model_uff_path(model_name)
model_to_uff(ssd_pb_path, ssd_uff_path, silent)
| TensorRT-master | samples/python/uff_ssd/utils/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# VOC mAP computation, based on https://github.com/amdegroot/ssd.pytorch
import os
import sys
import pickle
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
import utils.voc as voc_utils
from utils.paths import PATHS
def parse_voc_annotation_xml(voc_annotiotion_xml):
"""Parse VOC annotation XML file.
VOC image annotations are described in XML files
shipped with VOC dataset, with one XML file per each image.
This function reads relevant object detection data from given
file and saves it to Python data structures.
Args:
voc_annotation_xml (str): VOC annotation XML file path
Returns:
Python list of object detections metadata.
"""
tree = ET.parse(voc_annotiotion_xml)
size = tree.find('size')
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['image_width'] = size.find('width').text
obj_struct['image_height'] = size.find('height').text
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
# Coordinates in VOC XMLs are in [1, 256] format, but we use [0, 255]
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_voc_results_file_template(cls, results_dir):
"""Fetches inference detection result file path for given class.
During TensorRT/Tensorflow inference, we save class detections into
separate files, for later mAP computation. This function fetches
paths of these files.
Args:
cls (str): VOC class label
results_dir (str): path of directory containing detection results
Returns:
str: Detection results path for given class.
"""
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_test_{}.txt'.format(cls)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
path = os.path.join(results_dir, filename)
return path
def do_python_eval(results_dir):
cachedir = PATHS.get_voc_annotation_cache_path()
aps = []
for i, cls in enumerate(voc_utils.VOC_CLASSES_LIST):
filename = get_voc_results_file_template(cls, results_dir)
rec, prec, ap = voc_eval(
filename,
PATHS.get_voc_image_set_path(),
cls, cachedir,
ovthresh=0.5)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
print('Mean AP = {:.4f}'.format(np.mean(aps)))
def voc_ap(rec, prec):
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
return ap
def read_voc_annotations(annotations_dir, image_numbers):
if not os.path.isdir(annotations_dir):
os.makedirs(annotations_dir)
annotations_file = os.path.join(annotations_dir, 'annots.pkl')
if not os.path.isfile(annotations_file):
# If annotations were not present, compute them
detections = {}
for i, image_num in enumerate(image_numbers):
detections[image_num] = parse_voc_annotation_xml(
PATHS.get_voc_annotation_path().format(image_num))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(image_numbers)))
# Save
print('Saving cached annotations to {:s}'.format(annotations_file))
with open(annotations_file, 'wb') as f:
pickle.dump(detections, f)
else:
# If annotations were present, load them
with open(annotations_file, 'rb') as f:
detections = pickle.load(f)
return detections
def extract_class_detetions(voc_detections, classname, image_numbers):
class_detections = {}
for image_num in image_numbers:
R = [obj for obj in voc_detections[image_num] if obj['name'] == classname]
image_bboxes = [x['bbox'] for x in R]
# Transform VOC bboxes to make them describe pre-resized 300x300 images
for idx, bbox in enumerate(image_bboxes):
bbox = np.array(bbox).astype(np.float32)
width = float(R[0]['image_width'])
height = float(R[0]['image_height'])
bbox[0] *= (300.0 / width)
bbox[2] *= (300.0 / width)
bbox[1] *= (300.0 / height)
bbox[3] *= (300.0 / height)
image_bboxes[idx] = bbox
image_bboxes = np.array(image_bboxes)
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
class_detections[image_num] = {
'bbox': image_bboxes,
'difficult': difficult,
'det': det
}
return class_detections
def voc_eval(detpath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5):
with open(imagesetfile, 'r') as f:
lines = f.readlines()
image_numbers = [x.strip() for x in lines]
voc_detections = read_voc_annotations(cachedir, image_numbers)
class_detections = extract_class_detetions(voc_detections, classname,
image_numbers)
is_detection_difficult = np.concatenate(
[class_detections[image_num]['difficult'] for image_num in image_numbers]
)
not_difficult_count = sum(~is_detection_difficult)
# Read detections outputed by model
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines):
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
bboxes = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
bboxes = bboxes[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# Go down dets and mark TPs and FPs
num_detections = len(image_ids)
tp = np.zeros(num_detections)
fp = np.zeros(num_detections)
for detection in range(num_detections):
R = class_detections[image_ids[detection]]
bbox = bboxes[detection, :].astype(float)
ovmax = -np.inf
bbox_gt = R['bbox'].astype(float)
if bbox_gt.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(bbox_gt[:, 0], bbox[0])
iymin = np.maximum(bbox_gt[:, 1], bbox[1])
ixmax = np.minimum(bbox_gt[:, 2], bbox[2])
iymax = np.minimum(bbox_gt[:, 3], bbox[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) +
(bbox_gt[:, 2] - bbox_gt[:, 0]) *
(bbox_gt[:, 3] - bbox_gt[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[detection] = 1.
R['det'][jmax] = 1
else:
fp[detection] = 1.
else:
fp[detection] = 1.
# Compute precision and recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(not_difficult_count)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
| TensorRT-master | samples/python/uff_ssd/utils/mAP.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from common import HostDeviceMem
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(trt_logger) as runtime:
config.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
| TensorRT-master | samples/python/uff_ssd/utils/engine.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import tensorrt as trt
from PIL import Image
import pycuda.driver as cuda
import numpy as np
import utils.engine as engine_utils # TRT Engine creation/save/load utils
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
import common
# TensorRT logger singleton
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class TRTInference(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, trt_engine_path, uff_model_path, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1):
"""Initializes TensorRT objects needed for model inference.
Args:
trt_engine_path (str): path where TensorRT engine should be stored
uff_model_path (str): path of .uff model
trt_engine_datatype (trt.DataType):
requested precision of TensorRT engine used for inference
batch_size (int): batch size for which engine
should be optimized for
"""
# We first load all custom plugins shipped with TensorRT,
# some of them will be needed during inference
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(TRT_LOGGER)
# TRT engine placeholder
self.trt_engine = None
# Display requested engine settings to stdout
print("TensorRT inference engine settings:")
print(" * Inference precision - {}".format(trt_engine_datatype))
print(" * Max batch size - {}\n".format(batch_size))
# If engine is not cached, we need to build it
if not os.path.exists(trt_engine_path):
# This function uses supplied .uff file
# alongside with UffParser to build TensorRT
# engine. For more details, check implmentation
self.trt_engine = engine_utils.build_engine(
uff_model_path, TRT_LOGGER,
trt_engine_datatype=trt_engine_datatype,
batch_size=batch_size)
# Save the engine to file
engine_utils.save_engine(self.trt_engine, trt_engine_path)
# If we get here, the file with engine exists, so we can load it
if not self.trt_engine:
print("Loading cached TensorRT engine from {}".format(
trt_engine_path))
self.trt_engine = engine_utils.load_engine(
self.trt_runtime, trt_engine_path)
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = \
engine_utils.allocate_buffers(self.trt_engine)
# Execution context is needed for inference
self.context = self.trt_engine.create_execution_context()
# Allocate memory for multiple usage [e.g. multiple batch inference]
input_volume = trt.volume(ModelData.INPUT_SHAPE)
self.numpy_array = np.zeros((self.trt_engine.max_batch_size, input_volume))
def infer(self, image_path):
"""Infers model on given image.
Args:
image_path (str): image to run object detection model on
"""
# Load image into CPU
img = self._load_img(image_path)
# Copy it into appropriate place into memory
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, img.ravel())
# When infering on single image, we measure inference
# time to output it to the user
inference_start_time = time.time()
# Fetch output from the model
[detection_out, keepCount_out] = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream)
# Output inference time
print("TensorRT inference time: {} ms".format(
int(round((time.time() - inference_start_time) * 1000))))
# And return results
return detection_out, keepCount_out
def infer_batch(self, image_paths):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.trt_engine.max_batch_size
actual_batch_size = len(image_paths)
if actual_batch_size > max_batch_size:
raise ValueError(
"image_paths list bigger ({}) than engine max batch size ({})".format(actual_batch_size, max_batch_size))
# Load all images to CPU...
imgs = self._load_imgs(image_paths)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, imgs.ravel())
# ...fetch model outputs...
[detection_out, keep_count_out] = common.do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size)
# ...and return results.
return detection_out, keep_count_out
def _load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image).reshape(
(im_height, im_width, ModelData.get_input_channels())
).astype(np.uint8)
def _load_imgs(self, image_paths):
batch_size = self.trt_engine.max_batch_size
for idx, image_path in enumerate(image_paths):
img_np = self._load_img(image_path)
self.numpy_array[idx] = img_np
return self.numpy_array
def _load_img(self, image_path):
image = Image.open(image_path)
model_input_width = ModelData.get_input_width()
model_input_height = ModelData.get_input_height()
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(model_input_width, model_input_height),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
| TensorRT-master | samples/python/uff_ssd/utils/inference_trt.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# VOC dataset utility functions
import numpy as np
VOC_CLASSES_LIST = [
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor'
]
VOC_CLASSES_SET = set(VOC_CLASSES_LIST)
VOC_CLASS_ID = {
cls_name: idx for idx, cls_name in enumerate(VOC_CLASSES_LIST)
}
# Random RGB colors for each class (useful for drawing bounding boxes)
VOC_COLORS = \
np.random.uniform(0, 255, size=(len(VOC_CLASSES_LIST), 3)).astype(np.uint8)
def convert_coco_to_voc(label):
"""Converts COCO class name to VOC class name, if possible.
COCO classes are a superset of VOC classes, but
some classes have different names (e.g. airplane
in COCO is aeroplane in VOC). This function gets
COCO label and converts it to VOC label,
if conversion is needed.
Args:
label (str): COCO label
Returns:
str: VOC label corresponding to given label if such exists,
otherwise returns original label
"""
COCO_VOC_DICT = {
'airplane': 'aeroplane',
'motorcycle': 'motorbike',
'dining table': 'diningtable',
'potted plant': 'pottedplant',
'couch': 'sofa',
'tv': 'tvmonitor'
}
if label in COCO_VOC_DICT:
return COCO_VOC_DICT[label]
else:
return label
def coco_label_to_voc_label(label):
"""Returns VOC label corresponding to given COCO label.
COCO classes are superset of VOC classes, this function
returns label corresponding to given COCO class label
or None if such label doesn't exist.
Args:
label (str): COCO class label
Returns:
str: VOC label corresponding to given label or None
"""
label = convert_coco_to_voc(label)
if label in VOC_CLASSES_SET:
return label
else:
return None
def is_voc_label(label):
"""Returns boolean which tells if given label is VOC label.
Args:
label (str): object label
Returns:
bool: is given label a VOC class label
"""
return label in VOC_CLASSES_SET
def get_voc_label_color(label):
"""Returns color corresponding to given VOC label, or None.
Args:
label (str): object label
Returns:
np.array: RGB color described in 3-element np.array
"""
if not is_voc_label(label):
return None
else:
return VOC_COLORS[VOC_CLASS_ID[label]]
| TensorRT-master | samples/python/uff_ssd/utils/voc.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains functions for training a PyTorch MNIST Model
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import os
from random import randint
# Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
self.fc1 = nn.Linear(800, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)
x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)
x = x.view(-1, 800)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class MnistModel(object):
def __init__(self):
self.batch_size = 64
self.test_batch_size = 100
self.learning_rate = 0.0025
self.sgd_momentum = 0.9
self.log_interval = 100
# Fetch MNIST data set.
self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.test_batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.network = Net()
self.latest_test_accuracy = 0.0
# Train the network for one or more epochs, validating after each epoch.
def learn(self, num_epochs=2):
# Train the network for a single epoch
def train(epoch):
self.network.train()
optimizer = optim.SGD(self.network.parameters(), lr=self.learning_rate, momentum=self.sgd_momentum)
for batch, (data, target) in enumerate(self.train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = self.network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch * len(data), len(self.train_loader.dataset), 100. * batch / len(self.train_loader), loss.data.item()))
# Test the network
def test(epoch):
self.network.eval()
test_loss = 0
correct = 0
for data, target in self.test_loader:
with torch.no_grad():
data, target = Variable(data), Variable(target)
output = self.network(data)
test_loss += F.nll_loss(output, target).data.item()
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
test_loss /= len(self.test_loader)
self.latest_test_accuracy = float(correct) / len(self.test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(test_loss, correct, len(self.test_loader.dataset), 100. * self.latest_test_accuracy))
for e in range(num_epochs):
train(e + 1)
test(e + 1)
# @brief Get the latest accuracy on the test set
# @pre self.learn.test (and thus self.learn()) need to be run
def get_latest_test_set_accuracy(self):
return self.latest_test_accuracy
def get_weights(self):
return self.network.state_dict()
# Retrieve a single sample out of a batch and convert to flattened numpy array
def convert_to_flattened_numpy_array(self, batch_data, batch_target, sample_idx):
test_case = batch_data.numpy()[sample_idx].ravel().astype(np.float32)
test_name = batch_target.numpy()[sample_idx]
return test_case, test_name
# Generator to loop over every sample in the test set, sample by sample
def get_all_test_samples(self):
for data, target in self.test_loader:
for case_num in range(len(data)):
yield self.convert_to_flattened_numpy_array(data, target, case_num)
| TensorRT-master | samples/python/engine_refit_mnist/model.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
# This sample uses an MNIST PyTorch model to create a TensorRT Inference Engine
import model
import numpy as np
import pycuda.autoinit
import tensorrt as trt
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
import common
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class ModelData(object):
INPUT_NAME = "data"
INPUT_SHAPE = (1, 28, 28)
OUTPUT_NAME = "prob"
OUTPUT_SIZE = 10
DTYPE = trt.float32
# Populate the TRT network, injecting some dummy weights
def populate_network_with_some_dummy_weights(network, weights):
# Configure the network layers based on the weights provided.
input_tensor = network.add_input(name=ModelData.INPUT_NAME, dtype=ModelData.DTYPE, shape=ModelData.INPUT_SHAPE)
# Set dummy weights for the kernel and bias weights in the conv1 layer. We
# will refit the engine with the actual weights later.
conv1_w = np.zeros((20,5,5), dtype=np.float32)
conv1_b = np.zeros(20, dtype=np.float32)
conv1 = network.add_convolution(input=input_tensor, num_output_maps=20, kernel_shape=(5, 5), kernel=conv1_w, bias=conv1_b)
conv1.name = "conv_1"
conv1.stride = (1, 1)
# Associate weights with name and refit weights via name later in refitter.
network.set_weights_name(conv1_w, 'conv1.weight')
pool1 = network.add_pooling(input=conv1.get_output(0), type=trt.PoolingType.MAX, window_size=(2, 2))
pool1.stride = (2, 2)
conv2_w = weights['conv2.weight'].numpy()
conv2_b = weights['conv2.bias'].numpy()
conv2 = network.add_convolution(pool1.get_output(0), 50, (5, 5), conv2_w, conv2_b)
conv2.stride = (1, 1)
pool2 = network.add_pooling(conv2.get_output(0), trt.PoolingType.MAX, (2, 2))
pool2.stride = (2, 2)
fc1_w = weights['fc1.weight'].numpy()
fc1_b = weights['fc1.bias'].numpy()
fc1 = network.add_fully_connected(input=pool2.get_output(0), num_outputs=500, kernel=fc1_w, bias=fc1_b)
relu1 = network.add_activation(input=fc1.get_output(0), type=trt.ActivationType.RELU)
fc2_w = weights['fc2.weight'].numpy()
fc2_b = weights['fc2.bias'].numpy()
fc2 = network.add_fully_connected(relu1.get_output(0), ModelData.OUTPUT_SIZE, fc2_w, fc2_b)
fc2.get_output(0).name = ModelData.OUTPUT_NAME
network.mark_output(tensor=fc2.get_output(0))
# Build a TRT engine, but leave out some weights
def build_engine_with_some_missing_weights(weights):
# For more information on TRT basics, refer to the introductory samples.
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network()
config = builder.create_builder_config()
runtime = trt.Runtime(TRT_LOGGER)
config.max_workspace_size = common.GiB(1)
# Set the refit flag in the builder
config.set_flag(trt.BuilderFlag.REFIT)
# Populate the network using weights from the PyTorch model.
populate_network_with_some_dummy_weights(network, weights)
# Build and return an engine.
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
# Copy an image to the pagelocked input buffer
def load_img_to_input_buffer(img, pagelocked_buffer):
np.copyto(pagelocked_buffer, img)
# Get the accuracy on the test set using TensorRT
def get_trt_test_accuracy(engine, inputs, outputs, bindings, stream, mnist_model):
context = engine.create_execution_context()
correct = 0
total = 0
# Run inference on every sample.
# Technically this could be batched, however this only comprises a fraction of total
# time spent in the test.
for test_img, test_name in mnist_model.get_all_test_samples():
load_img_to_input_buffer(test_img, pagelocked_buffer=inputs[0].host)
# For more information on performing inference, refer to the introductory samples.
# The common.do_inference function will return a list of outputs - we only have one in this case.
[output] = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
pred = np.argmax(output)
correct += (test_name == pred)
total += 1
accuracy = float(correct)/total
print("Got {} correct predictions out of {} ({:.1f}%)".format(correct, total, 100 * accuracy))
return accuracy
def main():
common.add_help(description="Runs an MNIST network using a PyTorch model")
# Train the PyTorch model
mnist_model = model.MnistModel()
mnist_model.learn()
weights = mnist_model.get_weights()
# Do inference with TensorRT.
engine = build_engine_with_some_missing_weights(weights)
# Build an engine, allocate buffers and create a stream.
# For more information on buffer allocation, refer to the introductory samples.
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
print("Accuracy Before Engine Refit")
get_trt_test_accuracy(engine, inputs, outputs, bindings, stream, mnist_model)
# Refit the engine with the actual trained weights for the conv_1 layer.
refitter = trt.Refitter(engine, TRT_LOGGER)
# To get a list of all refittable layers and associated weightRoles
# in the network, use refitter.get_all()
# Set the actual weights for the conv_1 layer. Since it consists of
# kernel weights and bias weights, set each of them by specifying
# the WeightsRole.
# Prefer to refit named weights via set_named_weights
refitter.set_named_weights('conv1.weight', weights['conv1.weight'].numpy())
# set_named_weights is not available for unnamed weights. Call set_weights instead.
refitter.set_weights("conv_1", trt.WeightsRole.BIAS,
weights['conv1.bias'].numpy())
# Get missing weights names. This should return empty
# lists in this case.
missing_weights = refitter.get_missing_weights()
assert len(missing_weights) == 0, "Refitter found missing weights. Call set_named_weights() or set_weights() for all missing weights"
# Refit the engine with the new weights. This will return True if
# the refit operation succeeded.
assert refitter.refit_cuda_engine()
expected_correct_predictions = mnist_model.get_latest_test_set_accuracy()
print("Accuracy After Engine Refit (expecting {:.1f}% correct predictions)".format(100 * expected_correct_predictions))
assert get_trt_test_accuracy(engine, inputs, outputs, bindings, stream, mnist_model) >= expected_correct_predictions
if __name__ == '__main__':
main()
| TensorRT-master | samples/python/engine_refit_mnist/sample.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import graphsurgeon as gs
CropAndResize = gs.create_plugin_node(name='roi_pooling_conv_1/CropAndResize_new', op="CropAndResize", inputs=['activation_7/Relu', 'proposal'], crop_height=7, crop_width=7)
Proposal = gs.create_plugin_node(name='proposal', op='Proposal', inputs=['rpn_out_class/Sigmoid', 'rpn_out_regress/BiasAdd'], input_height=272, input_width=480, rpn_stride=16, roi_min_size=1.0, nms_iou_threshold=0.7, pre_nms_top_n=6000, post_nms_top_n=300, anchor_sizes=[32.0, 64.0, 128.0], anchor_ratios=[1.0, 0.5, 2.0])
namespace_plugin_map = {
"crop_and_resize_1/Reshape" : CropAndResize,
'crop_and_resize_1/CropAndResize' : CropAndResize,
"crop_and_resize_1/transpose" : CropAndResize,
"crop_and_resize_1/transpose_1" : CropAndResize
}
def preprocess(dynamic_graph):
# Now create a new graph by collapsing namespaces
dynamic_graph.append(Proposal)
dynamic_graph.remove(dynamic_graph.find_nodes_by_name('input_2'))
dynamic_graph.collapse_namespaces(namespace_plugin_map)
| TensorRT-master | samples/sampleUffFasterRCNN/config.py |
#!/usr/bin/python
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script to dump TensorFlow weights in TRT v1 and v2 dump format.
# The V1 format is for TensorRT 4.0. The V2 format is for TensorRT 4.0 and later.
import sys
import struct
import argparse
try:
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
except ImportError as err:
sys.stderr.write("""Error: Failed to import module ({})""".format(err))
sys.exit()
parser = argparse.ArgumentParser(description='TensorFlow Weight Dumper')
parser.add_argument('-m', '--model', required=True, help='The checkpoint file basename, example basename(model.ckpt-766908.data-00000-of-00001) -> model.ckpt-766908')
parser.add_argument('-o', '--output', required=True, help='The weight file to dump all the weights to.')
parser.add_argument('-1', '--wtsv1', required=False, default=False, type=bool, help='Dump the weights in the wts v1.')
opt = parser.parse_args()
if opt.wtsv1:
print("Outputting the trained weights in TensorRT's wts v1 format. This format is documented as:")
print("Line 0: <number of buffers in the file>")
print("Line 1-Num: [buffer name] [buffer type] [buffer size] <hex values>")
else:
print("Outputting the trained weights in TensorRT's wts v2 format. This format is documented as:")
print("Line 0: <number of buffers in the file>")
print("Line 1-Num: [buffer name] [buffer type] [(buffer shape{e.g. (1, 2, 3)}] <buffer shaped size bytes of data>")
inputbase = opt.model
outputbase = opt.output
def float_to_hex(f):
return hex(struct.unpack('<I', struct.pack('<f', f))[0])
def getTRTType(tensor):
if tf.as_dtype(tensor.dtype) == tf.float32:
return 0
if tf.as_dtype(tensor.dtype) == tf.float16:
return 1
if tf.as_dtype(tensor.dtype) == tf.int8:
return 2
if tf.as_dtype(tensor.dtype) == tf.int32:
return 3
print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype))
sys.exit();
try:
# Open output file
if opt.wtsv1:
outputFileName = outputbase + ".wts"
else:
outputFileName = outputbase + ".wts2"
outputFile = open(outputFileName, 'w')
# read vars from checkpoint
reader = pywrap_tensorflow.NewCheckpointReader(inputbase)
var_to_shape_map = reader.get_variable_to_shape_map()
# Record count of weights
count = 0
for key in sorted(var_to_shape_map):
count += 1
outputFile.write("%s\n"%(count))
# Dump the weights in either v1 or v2 format
for key in sorted(var_to_shape_map):
tensor = reader.get_tensor(key)
file_key = key.replace('/','_')
typeOfElem = getTRTType(tensor)
val = tensor.shape
if opt.wtsv1:
val = tensor.size
print("%s %s %s "%(file_key, typeOfElem, val))
flat_tensor = tensor.flatten()
outputFile.write("%s 0 %s "%(file_key, val))
if opt.wtsv1:
for weight in flat_tensor:
hexval = float_to_hex(float(weight))
outputFile.write("%s "%(hexval[2:]))
else:
outputFile.write(flat_tensor.tobytes())
outputFile.write("\n");
outputFile.close()
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
if ("Data loss" in str(e) and
(any([e in inputbase for e in [".index", ".meta", ".data"]]))):
proposed_file = ".".join(inputbase.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the filename
*prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file))
| TensorRT-master | samples/common/dumpTFWts.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import graphsurgeon as gs
import tensorflow as tf
Input = gs.create_node("Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, 3, 300, 300])
PriorBox = gs.create_plugin_node(name="GridAnchor", op="GridAnchor_TRT",
numLayers=6,
minSize=0.2,
maxSize=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
variance=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1])
NMS = gs.create_plugin_node(name="NMS", op="NMS_TRT",
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=1e-8,
nmsThreshold=0.6,
topK=100,
keepTopK=100,
numClasses=91,
inputOrder=[0, 2, 1],
confSigmoid=1,
isNormalized=1)
concat_priorbox = gs.create_node(name="concat_priorbox", op="ConcatV2", dtype=tf.float32, axis=2)
concat_box_loc = gs.create_plugin_node("concat_box_loc", op="FlattenConcat_TRT", dtype=tf.float32, axis=1, ignoreBatch=0)
concat_box_conf = gs.create_plugin_node("concat_box_conf", op="FlattenConcat_TRT", dtype=tf.float32, axis=1, ignoreBatch=0)
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"MultipleGridAnchorGenerator/Concatenate": concat_priorbox,
"MultipleGridAnchorGenerator/Identity": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
def preprocess(dynamic_graph):
# Now create a new graph by collapsing namespaces
dynamic_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
dynamic_graph.remove(dynamic_graph.graph_outputs, remove_exclusive_dependencies=False)
| TensorRT-master | samples/sampleUffSSD/config.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import graphsurgeon as gs
import tensorflow as tf
fpn_p5upsampled = gs.create_plugin_node("fpn_p5upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p4upsampled = gs.create_plugin_node("fpn_p4upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
fpn_p3upsampled = gs.create_plugin_node("fpn_p3upsampled", op="ResizeNearest_TRT", dtype=tf.float32, scale=2.0)
roi = gs.create_plugin_node("ROI", op="ProposalLayer_TRT", prenms_topk=1024, keep_topk=1000, iou_threshold=0.7, image_size=[3, 1024, 1024])
roi_align_classifier = gs.create_plugin_node("roi_align_classifier", op="PyramidROIAlign_TRT", pooled_size=7)
mrcnn_detection = gs.create_plugin_node("mrcnn_detection", op="DetectionLayer_TRT", num_classes=81, keep_topk=100, score_threshold=0.7, iou_threshold=0.3)
roi_align_mask = gs.create_plugin_node("roi_align_mask_trt", op="PyramidROIAlign_TRT", pooled_size=14)
mrcnn_detection_bboxes = gs.create_plugin_node("mrcnn_detection_bboxes", op="SpecialSlice_TRT")
namespace_plugin_map = {
"fpn_p5upsampled":fpn_p5upsampled,
"fpn_p4upsampled":fpn_p4upsampled,
"fpn_p3upsampled":fpn_p3upsampled,
"roi_align_classifier":roi_align_classifier,
"mrcnn_detection":mrcnn_detection,
"ROI":roi,
"roi_align_mask":roi_align_mask,
"lambda_1": mrcnn_detection_bboxes,
}
timedistributed_remove_list = [
"mrcnn_class_conv1/Reshape/shape", "mrcnn_class_conv1/Reshape", "mrcnn_class_conv1/Reshape_1/shape", "mrcnn_class_conv1/Reshape_1",
"mrcnn_class_bn1/Reshape/shape", "mrcnn_class_bn1/Reshape", "mrcnn_class_bn1/Reshape_5/shape", "mrcnn_class_bn1/Reshape_5",
"mrcnn_class_conv2/Reshape/shape", "mrcnn_class_conv2/Reshape", "mrcnn_class_conv2/Reshape_1/shape", "mrcnn_class_conv2/Reshape_1",
"mrcnn_class_bn2/Reshape/shape", "mrcnn_class_bn2/Reshape", "mrcnn_class_bn2/Reshape_5/shape", "mrcnn_class_bn2/Reshape_5",
"mrcnn_class_logits/Reshape/shape", "mrcnn_class_logits/Reshape","mrcnn_class_logits/Reshape_1/shape", "mrcnn_class_logits/Reshape_1",
"mrcnn_class/Reshape/shape", "mrcnn_class/Reshape","mrcnn_class/Reshape_1/shape", "mrcnn_class/Reshape_1",
"mrcnn_bbox_fc/Reshape/shape", "mrcnn_bbox_fc/Reshape","mrcnn_bbox_fc/Reshape_1/shape", "mrcnn_bbox_fc/Reshape_1",
"mrcnn_mask_conv1/Reshape/shape", "mrcnn_mask_conv1/Reshape", "mrcnn_mask_conv1/Reshape_1/shape", "mrcnn_mask_conv1/Reshape_1",
"mrcnn_mask_bn1/Reshape/shape", "mrcnn_mask_bn1/Reshape", "mrcnn_mask_bn1/Reshape_5/shape", "mrcnn_mask_bn1/Reshape_5",
"mrcnn_mask_conv2/Reshape/shape", "mrcnn_mask_conv2/Reshape", "mrcnn_mask_conv2/Reshape_1/shape", "mrcnn_mask_conv2/Reshape_1",
"mrcnn_mask_bn2/Reshape/shape", "mrcnn_mask_bn2/Reshape", "mrcnn_mask_bn2/Reshape_5/shape", "mrcnn_mask_bn2/Reshape_5",
"mrcnn_mask_conv3/Reshape/shape", "mrcnn_mask_conv3/Reshape", "mrcnn_mask_conv3/Reshape_1/shape", "mrcnn_mask_conv3/Reshape_1",
"mrcnn_mask_bn3/Reshape/shape", "mrcnn_mask_bn3/Reshape", "mrcnn_mask_bn3/Reshape_5/shape", "mrcnn_mask_bn3/Reshape_5",
"mrcnn_mask_conv4/Reshape/shape", "mrcnn_mask_conv4/Reshape", "mrcnn_mask_conv4/Reshape_1/shape", "mrcnn_mask_conv4/Reshape_1",
"mrcnn_mask_bn4/Reshape/shape", "mrcnn_mask_bn4/Reshape", "mrcnn_mask_bn4/Reshape_5/shape", "mrcnn_mask_bn4/Reshape_5",
"mrcnn_mask_deconv/Reshape/shape", "mrcnn_mask_deconv/Reshape", "mrcnn_mask_deconv/Reshape_1/shape", "mrcnn_mask_deconv/Reshape_1",
"mrcnn_mask/Reshape/shape", "mrcnn_mask/Reshape", "mrcnn_mask/Reshape_1/shape", "mrcnn_mask/Reshape_1",
]
timedistributed_connect_pairs = [
("mrcnn_mask_deconv/Relu", "mrcnn_mask/convolution"), # mrcnn_mask_deconv -> mrcnn_mask
("activation_74/Relu", "mrcnn_mask_deconv/conv2d_transpose"), #active74 -> mrcnn_mask_deconv
("mrcnn_mask_bn4/batchnorm/add_1","activation_74/Relu"), # mrcnn_mask_bn4 -> active74
("mrcnn_mask_conv4/BiasAdd", "mrcnn_mask_bn4/batchnorm/mul_1"), #mrcnn_mask_conv4 -> mrcnn_mask_bn4
("activation_73/Relu", "mrcnn_mask_conv4/convolution"), #active73 -> mrcnn_mask_conv4
("mrcnn_mask_bn3/batchnorm/add_1","activation_73/Relu"), #mrcnn_mask_bn3 -> active73
("mrcnn_mask_conv3/BiasAdd", "mrcnn_mask_bn3/batchnorm/mul_1"), #mrcnn_mask_conv3 -> mrcnn_mask_bn3
("activation_72/Relu", "mrcnn_mask_conv3/convolution"), #active72 -> mrcnn_mask_conv3
("mrcnn_mask_bn2/batchnorm/add_1","activation_72/Relu"), #mrcnn_mask_bn2 -> active72
("mrcnn_mask_conv2/BiasAdd", "mrcnn_mask_bn2/batchnorm/mul_1"), #mrcnn_mask_conv2 -> mrcnn_mask_bn2
("activation_71/Relu", "mrcnn_mask_conv2/convolution"), #active71 -> mrcnn_mask_conv2
("mrcnn_mask_bn1/batchnorm/add_1","activation_71/Relu"), #mrcnn_mask_bn1 -> active71
("mrcnn_mask_conv1/BiasAdd", "mrcnn_mask_bn1/batchnorm/mul_1"), #mrcnn_mask_conv1 -> mrcnn_mask_bn1
("roi_align_mask_trt", "mrcnn_mask_conv1/convolution"), #roi_align_mask -> mrcnn_mask_conv1
("mrcnn_class_bn2/batchnorm/add_1","activation_69/Relu"), # mrcnn_class_bn2 -> active 69
("mrcnn_class_conv2/BiasAdd", "mrcnn_class_bn2/batchnorm/mul_1"), # mrcnn_class_conv2 -> mrcnn_class_bn2
("activation_68/Relu", "mrcnn_class_conv2/convolution"), # active 68 -> mrcnn_class_conv2
("mrcnn_class_bn1/batchnorm/add_1","activation_68/Relu"), # mrcnn_class_bn1 -> active 68
("mrcnn_class_conv1/BiasAdd", "mrcnn_class_bn1/batchnorm/mul_1"), # mrcnn_class_conv1 -> mrcnn_class_bn1
("roi_align_classifier", "mrcnn_class_conv1/convolution"), # roi_align_classifier -> mrcnn_class_conv1
]
dense_compatible_patch =["pool_squeeze/Squeeze", "pool_squeeze/Squeeze_1", #No need to squeeze the dimensions for TRT Dense Layer
"mrcnn_bbox/Shape", "mrcnn_bbox/strided_slice/stack", # mrcnn_bbox(Reshape): No need to reshape, cause we can process it as 1-D array in detectionlayer's kernel
"mrcnn_bbox/strided_slice/stack_1", "mrcnn_bbox/strided_slice/stack_2",
"mrcnn_bbox/strided_slice", "mrcnn_bbox/Reshape/shape/1",
"mrcnn_bbox/Reshape/shape/2", "mrcnn_bbox/Reshape/shape/3",
"mrcnn_bbox/Reshape/shape", "mrcnn_bbox/Reshape"]
dense_compatible_connect_pairs = [
("activation_69/Relu","mrcnn_bbox_fc/MatMul"), #activation_69 -> mrcnn_bbox_fc
("activation_69/Relu", "mrcnn_class_logits/MatMul"), #activation_69 -> mrcnn_class_logits
("mrcnn_class_logits/BiasAdd", "mrcnn_class/Softmax"), #mrcnn_class_logits -> mrcnn_class
("mrcnn_class/Softmax", "mrcnn_detection"), #mrcnn_class -> mrcnn_detection
("mrcnn_bbox_fc/BiasAdd", "mrcnn_detection"), #mrcnn_bbox_fc -> mrcnn_detection
]
def connect(dynamic_graph, connections_list):
for node_a_name, node_b_name in connections_list:
if node_a_name not in dynamic_graph.node_map[node_b_name].input:
dynamic_graph.node_map[node_b_name].input.insert(0, node_a_name)
def remove(dynamic_graph, remove_list):
for node_name in remove_list:
dynamic_graph.remove(dynamic_graph.node_map[node_name])
def preprocess(dynamic_graph):
# Now create a new graph by collapsing namespaces
dynamic_graph.collapse_namespaces(namespace_plugin_map, unique_inputs=True)
remove(dynamic_graph, timedistributed_remove_list)
remove(dynamic_graph, dense_compatible_patch)
remove(dynamic_graph, ['input_anchors', 'input_image_meta'])
connect(dynamic_graph, timedistributed_connect_pairs)
connect(dynamic_graph, dense_compatible_connect_pairs)
| TensorRT-master | samples/sampleUffMaskRCNN/converted/config.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import model_from_json, Model
from keras import backend as K
from keras.layers import Input, Lambda
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from mrcnn.model import *
import mrcnn.model as modellib
from mrcnn.config import Config
import sys
import os
ROOT_DIR = os.path.abspath("./")
LOG_DIR = os.path.join(ROOT_DIR, "logs")
import argparse
import os
import uff
def parse_command_line_arguments(args=None):
parser = argparse.ArgumentParser(prog='keras_to_trt', description='Convert trained keras .hdf5 model to trt .uff')
parser.add_argument(
'-w',
'--weights',
type=str,
default=None,
required=True,
help="The checkpoint weights file of keras model."
)
parser.add_argument(
'-o',
'--output_file',
type=str,
default=None,
required=True,
help="The path to output .uff file."
)
parser.add_argument(
'-l',
'--list-nodes',
action='store_true',
help="show list of nodes contained in converted pb"
)
parser.add_argument(
'-p',
'--preprocessor',
type=str,
default=False,
help="The preprocess function for converting tf node to trt plugin"
)
return parser.parse_args(args)
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def main(args=None):
K.set_image_data_format('channels_first')
K.set_learning_phase(0)
args = parse_command_line_arguments(args)
model_weights_path = args.weights
output_file_path = args.output_file
list_nodes = args.list_nodes
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=LOG_DIR, config=config).keras_model
model.load_weights(model_weights_path, by_name=True)
model_A = Model(inputs=model.input, outputs=model.get_layer('mrcnn_mask').output)
model_A.summary()
output_nodes = ['mrcnn_detection', "mrcnn_mask/Sigmoid"]
convert_model(model_A, output_file_path, output_nodes, preprocessor=args.preprocessor,
text=True, list_nodes=list_nodes)
def convert_model(inference_model, output_path, output_nodes=[], preprocessor=None, text=False,
list_nodes=False):
# convert the keras model to pb
orig_output_node_names = [node.op.name for node in inference_model.outputs]
print("The output names of tensorflow graph nodes: {}".format(str(orig_output_node_names)))
sess = K.get_session()
constant_graph = graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(),
orig_output_node_names)
temp_pb_path = "../temp.pb"
graph_io.write_graph(constant_graph, os.path.dirname(temp_pb_path), os.path.basename(temp_pb_path),
as_text=False)
predefined_output_nodes = output_nodes
if predefined_output_nodes != []:
trt_output_nodes = predefined_output_nodes
else:
trt_output_nodes = orig_output_node_names
# convert .pb to .uff
uff.from_tensorflow_frozen_model(
temp_pb_path,
output_nodes=trt_output_nodes,
preprocessor=preprocessor,
text=text,
list_nodes=list_nodes,
output_filename=output_path,
debug_mode = False
)
os.remove(temp_pb_path)
if __name__ == "__main__":
main()
| TensorRT-master | samples/sampleUffMaskRCNN/converted/mrcnn_to_trt_single.py |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import numpy as np
import sys
import os
import glob
import shutil
import struct
from random import shuffle
try:
from PIL import Image
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have Pillow installed.
For installation instructions, see:
http://pillow.readthedocs.io/en/stable/installation.html""".format(err))
height = 300
width = 300
NUM_BATCHES = 0
NUM_PER_BATCH = 1
NUM_CALIBRATION_IMAGES = 50
parser = argparse.ArgumentParser()
parser.add_argument('--inDir', required=True, help='Input directory')
parser.add_argument('--outDir', required=True, help='Output directory')
args = parser.parse_args()
CALIBRATION_DATASET_LOC = args.inDir + '/*.jpg'
# images to test
imgs = []
print("Location of dataset = " + CALIBRATION_DATASET_LOC)
imgs = glob.glob(CALIBRATION_DATASET_LOC)
shuffle(imgs)
imgs = imgs[:NUM_CALIBRATION_IMAGES]
NUM_BATCHES = NUM_CALIBRATION_IMAGES // NUM_PER_BATCH + (NUM_CALIBRATION_IMAGES % NUM_PER_BATCH > 0)
print("Total number of images = " + str(len(imgs)))
print("NUM_PER_BATCH = " + str(NUM_PER_BATCH))
print("NUM_BATCHES = " + str(NUM_BATCHES))
# output
outDir = args.outDir+"/batches"
if os.path.exists(outDir):
os.system("rm " + outDir +"/*")
# prepare output
if not os.path.exists(outDir):
os.makedirs(outDir)
for i in range(NUM_CALIBRATION_IMAGES):
os.system("convert "+imgs[i]+" -resize "+str(height)+"x"+str(width)+"! "+outDir+"/"+str(i)+".ppm")
CALIBRATION_DATASET_LOC= outDir + '/*.ppm'
imgs = glob.glob(CALIBRATION_DATASET_LOC)
# load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
img = 0
for i in range(NUM_BATCHES):
batchfile = outDir + "/batch_calibration" + str(i) + ".batch"
batchlistfile = outDir + "/batch_calibration" + str(i) + ".list"
batchlist = open(batchlistfile,'a')
batch = np.zeros(shape=(NUM_PER_BATCH, 3, height, width), dtype = np.float32)
for j in range(NUM_PER_BATCH):
batchlist.write(os.path.basename(imgs[img]) + '\n')
im = Image.open(imgs[img]).resize((width,height), Image.NEAREST)
in_ = np.array(im, dtype=np.float32, order='C')
in_ = in_[:,:,::-1]
in_-= np.array((104.0, 117.0, 123.0))
in_ = in_.transpose((2,0,1))
batch[j] = in_
img += 1
# save
batch.tofile(batchfile)
batchlist.close()
# Prepend batch shape information
ba = bytearray(struct.pack("4i", batch.shape[0], batch.shape[1], batch.shape[2], batch.shape[3]))
with open(batchfile, 'rb+') as f:
content = f.read()
f.seek(0,0)
f.write(ba)
f.write(content)
os.system("rm " + outDir +"/*.ppm")
| TensorRT-master | samples/sampleSSD/batchPrepare.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Simple printing utils
Utils to print traces and profiles in CSV format
'''
from __future__ import print_function
def combineDescriptions(prolog, features, descriptions):
''' Combine features with their descriptions '''
fullDescription = prolog
sep = ' '
for feature, description in zip(features, descriptions):
fullDescription += sep + feature + ' (' + description + ')'
sep = ', '
return fullDescription
def printHeader(allFeatures, selection, gp = False, count = False):
''' Print table header '''
if gp:
sep = '#'
if count:
sep += 'count, '
else:
sep = ''
for feature in allFeatures:
if feature in selection:
print(sep + feature, end = '')
sep = ', '
print('')
def printCsv(data, count = False):
''' Print trace in CSV format '''
c = 0
for row in data:
if count:
print(c, end = '')
c += 1
sep = ', '
else:
sep = ''
for r in row:
if isinstance(r, str):
print(sep + r, end = '')
else:
print('{}{:.6}'.format(sep, float(r)), end = '')
sep = ', '
print('')
def filterData(data, allFeatures, selection):
''' Drop features not in the given set '''
filteredData = []
for d in data:
row = []
for f in allFeatures:
if f in selection:
if f in d:
row.append(d[f])
else:
row.append('')
filteredData.append(row)
return filteredData
| TensorRT-master | samples/trtexec/prn_utils.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Print a trtexec profile from a JSON file
Given a JSON file containing a trtexec profile,
this program prints the profile in CSV table format.
Each row represents a layer in the profile.
The output format can be optionally converted to a
format suitable for GNUPlot.
'''
import sys
import json
import argparse
import prn_utils as pu
allFeatures = ['name', 'timeMs', 'averageMs', 'percentage']
defaultFeatures = ",".join(allFeatures)
descriptions = ['layer name', 'total layer time', 'average layer time', 'percentage of total time']
featuresDescription = pu.combineDescriptions('Features are (times in ms):', allFeatures, descriptions)
def hasNames(features):
''' Check if the name is included in the set '''
return 'name' in features
def totalData(features, profile):
''' Add row at the bottom with the total '''
accumulator = {}
for f in features:
accumulator[f] = 0
accumulator['name'] = 'total'
for row in profile:
for f in features:
if f in row and not f == 'name':
accumulator[f] += row[f]
return accumulator
def findAndRemove(profile, name):
''' Find named row in profile and remove '''
for r in range(len(profile)):
if profile[r]['name'] == name:
row = profile[r]
del profile[r]
return row
return None
def refName(name):
''' Add prefix ref to name '''
return 'ref' + name[0].capitalize() + name[1:]
def refFeatures(names):
''' Add prefix ref to features names '''
refNames = []
for name in names:
refNames.append(refName(name))
return refNames
def mergeHeaders(features, skipFirst = True):
''' Duplicate feature names for reference and target profile '''
if skipFirst:
return [features[0]] + refFeatures(features[1:]) + features[1:] + ['% difference']
return refFeatures(features) + features + ['% difference']
def addReference(row, reference):
''' Add reference results to results dictionary '''
for k,v in reference.items():
if k == 'name':
if k in row:
continue
else:
k = refName(k)
row[k] = v
def mergeRow(reference, profile, diff):
''' Merge reference and target profile results into a single row '''
row = {}
if profile:
row = profile
if reference:
addReference(row, reference)
if diff:
row['% difference'] = diff;
return row
def alignData(reference, profile, threshold):
''' Align and merge reference and target profiles '''
alignedData = []
for ref in reference:
prof = findAndRemove(profile, ref['name'])
if prof:
diff = (prof['averageMs'] / ref['averageMs'] - 1)*100
if abs(diff) >= threshold:
alignedData.append(mergeRow(ref, prof, diff))
else:
alignedData.append(mergeRow(ref, None, None))
for prof in profile:
alignedData.append(mergeRow(None, prof, None))
return alignedData
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--features', metavar='F[,F]*', default=defaultFeatures,
help='Comma separated list of features to print. ' + featuresDescription)
parser.add_argument('--total', action='store_true', help='Add total time row.')
parser.add_argument('--gp', action='store_true', help='Print GNUPlot format.')
parser.add_argument('--no-header', action='store_true', help='Omit the header row.')
parser.add_argument('--threshold', metavar='T', default=0.0, type=float,
help='Threshold of percentage difference.')
parser.add_argument('--reference', metavar='R', help='Reference profile file name.')
parser.add_argument('name', metavar='filename', help='Profile file.')
args = parser.parse_args()
global allFeatures
features = args.features.split(',')
for f in features:
if not f in allFeatures:
print('Feature {} not recognized'.format(f))
return
count = args.gp and not hasNames(features)
profile = None
reference = None
with open(args.name) as f:
profile = json.load(f)
profileCount = profile[0]['count']
profile = profile[1:]
if args.reference:
with open(args.reference) as f:
reference = json.load(f)
referenceCount = reference[0]['count']
reference = reference[1:]
allFeatures = mergeHeaders(allFeatures)
features = mergeHeaders(features, hasNames(features))
if not args.no_header:
if reference:
comment = '#' if args.gp else ''
print(comment + 'reference count: {} - profile count: {}'.format(referenceCount, profileCount))
pu.printHeader(allFeatures, features, args.gp, count)
if reference:
profile = alignData(reference, profile, args.threshold)
if args.total:
profile.append(totalData(allFeatures, profile))
if reference:
total = profile[len(profile) - 1]
total['% difference'] = (total['averageMs'] / total['refAverageMs'] - 1)*100
profile = pu.filterData(profile, allFeatures, features)
pu.printCsv(profile, count)
if __name__ == '__main__':
sys.exit(main())
| TensorRT-master | samples/trtexec/profiler.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Print a trtexec timing trace from a JSON file
Given a JSON file containing a trtexec timing trace,
this program prints the trace in CSV table format.
Each row represents an entry point in the trace.
The columns, as indicated by the header, respresent
one of the metric recorded. The output format can
be optionally converted to a format suitable for
GNUPlot.
'''
import sys
import json
import argparse
import prn_utils as pu
timestamps = ['startInMs', 'endInMs', 'startComputeMs', 'endComputeMs', 'startOutMs', 'endOutMs']
intervals = ['inMs', 'computeMs', 'outMs', 'latencyMs', 'endToEndMs']
allMetrics = timestamps + intervals
defaultMetrics = ",".join(allMetrics)
descriptions = ['start input', 'end input', 'start compute', 'end compute', 'start output',
'end output', 'input', 'compute', 'output', 'latency', 'end to end latency']
metricsDescription = pu.combine_descriptions('Possible metrics (all in ms) are:',
allMetrics, descriptions)
def skipTrace(trace, start):
''' Skip trace entries until start time '''
for t in range(len(trace)):
if trace[t]['startComputeMs'] >= start:
return trace[t:]
return []
def hasTimestamp(metrics):
''' Check if features have at least one timestamp '''
for timestamp in timestamps:
if timestamp in metrics:
return True
return False;
def avgData(data, avg, times):
''' Average trace entries (every avg entries) '''
averaged = []
accumulator = []
r = 0
for row in data:
if r == 0:
for m in row:
accumulator.append(m)
else:
for m in row[times:]:
accumulator[t] += m
r += 1
if r == avg:
for t in range(times, len(row)):
accumulator[t] /= avg
averaged.append(accumulator)
accumulator = []
r = 0
return averaged
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--metrics', metavar='M[,M]*', default=defaultMetrics,
help='Comma separated list of metrics to print. ' + metricsDescription)
parser.add_argument('--avg', metavar='N', type=int, default=1,
help='Print average every N records.')
parser.add_argument('--start', metavar='T', type=float, default=0,
help='Start trace at time T (drop records with compute start before T ms).')
parser.add_argument('--gp', action='store_true', help='Print GNUPlot format.')
parser.add_argument('--no-header', action='store_true', help='Omit the header row.')
parser.add_argument('name', metavar='filename', help='Trace file.')
args = parser.parse_args()
metrics = args.metrics.split(',')
count = args.gp and (not hasTimestamp(metrics) or len(metrics) == 1)
if not args.no_header:
pu.printHeader(allMetrics, metrics, args.gp, count)
with open(args.name) as f:
trace = json.load(f)
if args.start > 0:
trace = skipTrace(trace, args.start)
trace = pu.filterData(trace, allMetrics, metrics)
if args.avg > 1:
trace = avgData(trace, args.avg, hasTimestamp(metrics))
pu.printCsv(trace, count)
if __name__ == '__main__':
sys.exit(main())
| TensorRT-master | samples/trtexec/tracer.py |
#!/usr/bin/env python
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import re
# Configuration
copyright_year = "2020"
extensions_p = (".py", ".sh", ".cmake", "CMakeLists")
extensions_c = (".c", ".cpp", ".h", ".hpp", ".cu")
pattern_p = """#\s*
# Copyright \(c\) ([1-2][0-9]{3}),* NVIDIA CORPORATION.*
#\s*
# Licensed under the Apache License, Version 2.0 \(the "License"\);\s*
# you may not use this file except in compliance with the License.\s*
# You may obtain a copy of the License at\s*
#\s*
# http://www.apache.org/licenses/LICENSE-2.0\s*
#\s*
# Unless required by applicable law or agreed to in writing, software\s*
# distributed under the License is distributed on an "AS IS" BASIS,\s*
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\s*
# See the License for the specific language governing permissions and\s*
# limitations under the License.\s*
#
"""
header_p = """#
# Copyright (c) {year}, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""".format(year=copyright_year)
pattern_c = """/\*\s*
\* Copyright \(c\) ([1-2][0-9]{3}),* NVIDIA CORPORATION.*
\*\s*
\* Licensed under the Apache License, Version 2.0 \(the "License"\);\s*
\* you may not use this file except in compliance with the License.\s*
\* You may obtain a copy of the License at\s*
\*\s*
\* http://www.apache.org/licenses/LICENSE-2.0\s*
\*\s*
\* Unless required by applicable law or agreed to in writing, software\s*
\* distributed under the License is distributed on an "AS IS" BASIS,\s*
\* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\s*
\* See the License for the specific language governing permissions and\s*
\* limitations under the License.\s*
\*/
"""
header_c = """/*
* Copyright (c) {year}, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
""".format(year=copyright_year)
# Routines
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--dir', type=str, required=True, help='Root directory to start the scan')
parser.add_argument('-f','--force-update', action='store_true', help='Force the header writes for all files')
parser.add_argument('--dry-run', action='store_true', help='Just perform a dry-run')
parser.add_argument('--max-depth', type=int,default=100, help='Maximum depth to recurse while scanning files.')
return parser
def update(filename, args):
"""
Update copyright header for specified file
"""
if filename.endswith(extensions_p):
pattern = re.compile(pattern_p)
header = header_p
shebang = re.compile(r'^(\#\!.*\n)', re.MULTILINE)
elif filename.endswith(extensions_c):
pattern = re.compile(pattern_c)
header = header_c
shebang = None
else:
return
with open(filename, "r+") as f:
data = f.read()
match = pattern.search(data)
if match:
year = match.group(1)
if copyright_year == year:
if args.force_update:
print(filename,": FORCED")
new_data = pattern.sub(header, data, count=1)
else:
print(filename,": SKIP")
return
else:
print(filename,": UPDATE (",year,"->",copyright_year,")")
new_data = pattern.sub(header, data, count=1)
else:
match = shebang.search(data) if shebang else None
if match:
print(filename,": ADD ( after",match.group(1),")")
new_data = shebang.sub(match.group(1)+header, data, count=1)
else:
print(filename,": ADD ( top )")
new_data = header+data
if not args.dry_run:
with open(filename, "w") as f:
f.write(new_data)
def copyright_scan(directory, depth, args, exclude_dirs=[]):
"""
Update copyright for TensorRT sources
"""
if directory in exclude_dirs:
return
for f in os.listdir(directory):
filename = os.path.join(directory,f)
if os.path.isdir(filename) and (depth > 0):
copyright_scan(filename, depth-1, args, exclude_dirs)
elif filename.endswith(extensions_p + extensions_c):
update(filename, args)
def main():
parser = argparse.ArgumentParser(description='TensorRT copyright scan')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
exclude_dirs = ["./third_party","./build","./parsers/onnx", "./include"]
copyright_scan(args.dir, args.max_depth, args, exclude_dirs)
if __name__ == '__main__':
main()
| TensorRT-master | scripts/copyright-scan.py |
#!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
import sys
import subprocess
import argparse
SAMPLES_PER_WARP = {
('sm_70', False): 24,
('sm_80', False): 17,
('sm_80', True): 40
}
def main():
parser = argparse.ArgumentParser(
description="Compile & run mcmc prob training benchmark")
parser.add_argument(
"--no-run",
action="store_true",
required=False,
help="Do not run the compiled program"
)
parser.add_argument(
"--arch",
required=False,
default="sm_80",
help="Compute capability: [sm_70, sm_80]"
)
parser.add_argument(
"--debug",
action="store_true",
required=False,
help="If set, produce debug output (also sets the DEBUG macro)"
)
parser.add_argument(
"--tc",
action="store_true",
required=False,
help="If set, use the Tensor-core based kernel rather than FFMA. "
"Can only be set with arch sm_80"
)
args = parser.parse_args()
cmd = ["nvcc", "-std=c++14", "--extended-lambda", "-Iinclude"]
if not args.no_run:
cmd += ["-run"]
samples_per_warp = SAMPLES_PER_WARP.get((args.arch, args.tc))
if samples_per_warp is None:
raise ValueError(
"Invalid combination of arch ({}) and tensor-core use ({})".format(
args.arch, args.tc))
cmd += ["-arch={}".format(args.arch),
"-DDEVICE_ARCH={}0".format(args.arch[-2:]),
"-DN_WARP_SAMPLES={}".format(samples_per_warp)]
if args.tc:
cmd += ["-DUSE_TC=1"]
if args.debug:
cmd += ["-g", "-G"]
else:
cmd += ["-O3", "-lineinfo"]
cmd += ["main.cu"]
print("Compile/run command: " + " ".join(cmd))
child_proc = subprocess.Popen(cmd)
result = child_proc.wait()
return result
if __name__ == "__main__":
sys.exit(main())
| mcmc-bnn-example-main | compile_run.py |
#######################################################################################################################
#
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2017, Soumith Chintala. All rights reserved.
# ********************************************************************************************************************
#
#
# The code in this file is adapted from: https://github.com/pytorch/examples/tree/master/imagenet/main.py
#
# Main Difference from the original file: add the networks using partial convolution based padding
#
# Network options using zero padding: vgg16_bn, vgg19_bn, resnet50, resnet101, resnet152, ...
# Network options using partial conv based padding: pdvgg16_bn, pdvgg19_bn, pdresnet50, pdresnet101, pdresnet152, ...
#
# Contact: Guilin Liu ([email protected])
#
#######################################################################################################################
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# import torchvision.models as models
import torchvision.models as models_baseline # networks with zero padding
import models as models_partial # partial conv based padding
model_baseline_names = sorted(name for name in models_baseline.__dict__
if name.islower() and not name.startswith("__")
and callable(models_baseline.__dict__[name]))
model_partial_names = sorted(name for name in models_partial.__dict__
if name.islower() and not name.startswith("__")
and callable(models_partial.__dict__[name]))
model_names = model_baseline_names + model_partial_names
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# parser.add_argument('data', metavar='DIR',
# help='path to dataset')
parser.add_argument('--data_train', metavar='DIRTRAIN',
help='path to training dataset')
parser.add_argument('--data_val', metavar='DIRVAL',
help='path to validation dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
# parser.add_argument('--epochs', default=90, type=int, metavar='N',
# help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
# parser.add_argument('-b', '--batch-size', default=256, type=int,
# metavar='N', help='mini-batch size (default: 256)')
# use the batch size 256 or 192 depending on the memeory
parser.add_argument('-b', '--batch-size', default=192, type=int,
metavar='N', help='mini-batch size (default: 192)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--prefix', default='', type=str)
parser.add_argument('--ckptdirprefix', default='', type=str)
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
checkpoint_dir = args.ckptdirprefix + 'checkpoint_' + args.arch + '_' + args.prefix + '/'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
args.logger_fname = os.path.join(checkpoint_dir, 'loss.txt')
with open(args.logger_fname, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
log_file.write('world size: %d\n' % args.world_size)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
if args.arch in models_baseline.__dict__:
model = models_baseline.__dict__[args.arch](pretrained=True)
else:
model = models_partial.__dict__[args.arch](pretrained=True)
# model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if args.arch in models_baseline.__dict__:
model = models_baseline.__dict__[args.arch]()
else:
model = models_partial.__dict__[args.arch]()
# model = models.__dict__[args.arch]()
# logging
with open(args.logger_fname, "a") as log_file:
log_file.write('model created\n')
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
# if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
if args.arch.startswith('alexnet') or 'vgg' in args.arch:
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD([p for p in model.parameters() if p.requires_grad], args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
assert False
cudnn.benchmark = True
# Data loading code
# traindir = os.path.join(args.data, 'train')
# valdir = os.path.join(args.data, 'val')
traindir = args.data_train #os.path.join(args.data, 'train')
valdir = args.data_val #os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# logging
with open(args.logger_fname, "a") as log_file:
log_file.write('training/val dataset created\n')
if args.evaluate:
validate(val_loader, model, criterion)
return
# logging
with open(args.logger_fname, "a") as log_file:
log_file.write('started training\n')
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, foldername=checkpoint_dir, filename='checkpoint.pth.tar')
if epoch >= 94:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, False, foldername=checkpoint_dir, filename='epoch_'+str(epoch)+'_checkpoint.pth.tar')
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\n'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
with open(args.logger_fname, "a") as log_file:
log_file.write('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\n'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
with open(args.logger_fname, "a") as log_file:
log_file.write('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\n'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
with open(args.logger_fname, "a") as final_log_file:
final_log_file.write(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, foldername='', filename='checkpoint.pth.tar'):
torch.save(state, os.path.join(foldername, filename))
if is_best:
shutil.copyfile(os.path.join(foldername, filename), os.path.join(foldername, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| partialconv-master | main.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2017, Soumith Chintala. All rights reserved.
###############################################################################
'''
Code adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
Introduced partial convolutoins based padding for convolutional layers
'''
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from .partialconv2d import PartialConv2d
__all__ = ['PDResNet', 'pdresnet18', 'pdresnet34', 'pdresnet50', 'pdresnet101',
'pdresnet152']
# model_urls = {
# 'pdresnet18': '',
# 'pdresnet34': '',
# 'pdresnet50': '',
# 'pdresnet101': '',
# 'pdresnet152': '',
# }
model_urls = {
'pdresnet18': '',
'pdresnet34': '',
'pdresnet50': '',
'pdresnet101': '',
'pdresnet152': '',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return PartialConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = PartialConv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = PartialConv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = PartialConv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PDResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(PDResNet, self).__init__()
self.conv1 = PartialConv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, PartialConv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
PartialConv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def pdresnet18(pretrained=False, **kwargs):
"""Constructs a PDResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = PDResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdresnet18']))
return model
def pdresnet34(pretrained=False, **kwargs):
"""Constructs a PDResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = PDResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdresnet34']))
return model
def pdresnet50(pretrained=False, **kwargs):
"""Constructs a PDResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = PDResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdresnet50']))
return model
def pdresnet101(pretrained=False, **kwargs):
"""Constructs a PDResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = PDResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdresnet101']))
return model
def pdresnet152(pretrained=False, **kwargs):
"""Constructs a PDResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = PDResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdresnet152']))
return model
| partialconv-master | models/pd_resnet.py |
from .pd_resnet import *
from .pd_vgg import * | partialconv-master | models/__init__.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu ([email protected])
###############################################################################
"""VGG Losses"""
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
def gram_matrix(input_tensor):
"""
Compute Gram matrix
:param input_tensor: input tensor with shape
(batch_size, nbr_channels, height, width)
:return: Gram matrix of y
"""
(b, ch, h, w) = input_tensor.size()
features = input_tensor.view(b, ch, w * h)
features_t = features.transpose(1, 2)
# more efficient and formal way to avoid underflow for mixed precision training
input = torch.zeros(b, ch, ch).type(features.type())
gram = torch.baddbmm(input, features, features_t, beta=0, alpha=1./(ch * h * w), out=None)
# naive way to avoid underflow for mixed precision training
# features = features / (ch * h)
# gram = features.bmm(features_t) / w
# for fp32 training, it is also safe to use the following:
# gram = features.bmm(features_t) / (ch * h * w)
return gram
class PerceptualLoss(nn.Module):
"""
Perceptual Loss Module
"""
def __init__(self):
"""Init"""
super().__init__()
self.l1_loss = torch.nn.L1Loss()
self.mse_loss = torch.nn.MSELoss()
@staticmethod
def normalize_batch(batch, div_factor=255.):
"""
Normalize batch
:param batch: input tensor with shape
(batch_size, nbr_channels, height, width)
:param div_factor: normalizing factor before data whitening
:return: normalized data, tensor with shape
(batch_size, nbr_channels, height, width)
"""
# normalize using imagenet mean and std
mean = batch.data.new(batch.data.size())
std = batch.data.new(batch.data.size())
mean[:, 0, :, :] = 0.485
mean[:, 1, :, :] = 0.456
mean[:, 2, :, :] = 0.406
std[:, 0, :, :] = 0.229
std[:, 1, :, :] = 0.224
std[:, 2, :, :] = 0.225
batch = torch.div(batch, div_factor)
batch -= Variable(mean)
batch = torch.div(batch, Variable(std))
return batch
def forward(self, x, y):
"""
Forward
:param x: input tensor with shape
(batch_size, nbr_channels, height, width)
:param y: input tensor with shape
(batch_size, nbr_channels, height, width)
:return: l1 loss between the normalized data
"""
x = self.normalize_batch(x)
y = self.normalize_batch(y)
return self.l1_loss(x, y)
def make_vgg16_layers(style_avg_pool = False):
"""
make_vgg16_layers
Return a custom vgg16 feature module with avg pooling
"""
vgg16_cfg = [
64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M',
512, 512, 512, 'M', 512, 512, 512, 'M'
]
layers = []
in_channels = 3
for v in vgg16_cfg:
if v == 'M':
if style_avg_pool:
layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class VGG16Partial(nn.Module):
"""
VGG16 partial model
"""
def __init__(self, vgg_path='~/.torch/vgg16-397923af.pth', layer_num=3):
"""
Init
:param layer_num: number of layers
"""
super().__init__()
vgg_model = models.vgg16()
vgg_model.features = make_vgg16_layers()
vgg_model.load_state_dict(
torch.load(vgg_path, map_location='cpu')
)
vgg_pretrained_features = vgg_model.features
assert layer_num > 0
assert isinstance(layer_num, int)
self.layer_num = layer_num
self.slice1 = torch.nn.Sequential()
for x in range(5): # 4
self.slice1.add_module(str(x), vgg_pretrained_features[x])
if self.layer_num > 1:
self.slice2 = torch.nn.Sequential()
for x in range(5, 10): # (4, 9)
self.slice2.add_module(str(x), vgg_pretrained_features[x])
if self.layer_num > 2:
self.slice3 = torch.nn.Sequential()
for x in range(10, 17): # (9, 16)
self.slice3.add_module(str(x), vgg_pretrained_features[x])
if self.layer_num > 3:
self.slice4 = torch.nn.Sequential()
for x in range(17, 24): # (16, 23)
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for param in self.parameters():
param.requires_grad = False
@staticmethod
def normalize_batch(batch, div_factor=1.0):
"""
Normalize batch
:param batch: input tensor with shape
(batch_size, nbr_channels, height, width)
:param div_factor: normalizing factor before data whitening
:return: normalized data, tensor with shape
(batch_size, nbr_channels, height, width)
"""
# normalize using imagenet mean and std
mean = batch.data.new(batch.data.size())
std = batch.data.new(batch.data.size())
mean[:, 0, :, :] = 0.485
mean[:, 1, :, :] = 0.456
mean[:, 2, :, :] = 0.406
std[:, 0, :, :] = 0.229
std[:, 1, :, :] = 0.224
std[:, 2, :, :] = 0.225
batch = torch.div(batch, div_factor)
batch -= Variable(mean)
batch = torch.div(batch, Variable(std))
return batch
def forward(self, x):
"""
Forward, get features used for perceptual loss
:param x: input tensor with shape
(batch_size, nbr_channels, height, width)
:return: list of self.layer_num feature maps used to compute the
perceptual loss
"""
h = self.slice1(x)
h1 = h
output = []
if self.layer_num == 1:
output = [h1]
elif self.layer_num == 2:
h = self.slice2(h)
h2 = h
output = [h1, h2]
elif self.layer_num == 3:
h = self.slice2(h)
h2 = h
h = self.slice3(h)
h3 = h
output = [h1, h2, h3]
elif self.layer_num >= 4:
h = self.slice2(h)
h2 = h
h = self.slice3(h)
h3 = h
h = self.slice4(h)
h4 = h
output = [h1, h2, h3, h4]
return output
# perceptual loss and (spatial) style loss
class VGG16PartialLoss(PerceptualLoss):
"""
VGG16 perceptual loss
"""
def __init__(self, l1_alpha=5.0, perceptual_alpha=0.05, style_alpha=120,
smooth_alpha=0, feat_num=3, vgg_path='~/.torch/vgg16-397923af.pth'):
"""
Init
:param l1_alpha: weight of the l1 loss
:param perceptual_alpha: weight of the perceptual loss
:param style_alpha: weight of the style loss
:param smooth_alpha: weight of the regularizer
:param feat_num: number of feature maps
"""
super().__init__()
self.vgg16partial = VGG16Partial(vgg_path=vgg_path).eval()
self.loss_fn = torch.nn.L1Loss(size_average=True)
self.l1_weight = l1_alpha
self.vgg_weight = perceptual_alpha
self.style_weight = style_alpha
self.regularize_weight = smooth_alpha
self.dividor = 1
self.feat_num = feat_num
def forward(self, output0, target0):
"""
Forward
assuming both output0 and target0 are in the range of [0, 1]
:param output0: output of a model, tensor with shape
(batch_size, nbr_channels, height, width)
:param target0: target, tensor with shape
(batch_size, nbr_channels, height, width)
:return: total perceptual loss
"""
y = self.normalize_batch(target0, self.dividor)
x = self.normalize_batch(output0, self.dividor)
# L1 loss
l1_loss = self.l1_weight * (torch.abs(x - y).mean())
vgg_loss = 0
style_loss = 0
smooth_loss = 0
# VGG
if self.vgg_weight != 0 or self.style_weight != 0:
yc = Variable(y.data)
with torch.no_grad():
groundtruth = self.vgg16partial(yc)
generated = self.vgg16partial(x)
# vgg loss: VGG content loss
if self.vgg_weight > 0:
# for m in range(0, len(generated)):
for m in range(len(generated) - self.feat_num, len(generated)):
gt_data = Variable(groundtruth[m].data, requires_grad=False)
vgg_loss += (
self.vgg_weight * self.loss_fn(generated[m], gt_data)
)
# style loss: Gram matrix loss
if self.style_weight > 0:
# for m in range(0, len(generated)):
for m in range(len(generated) - self.feat_num, len(generated)):
gt_style = gram_matrix(
Variable(groundtruth[m].data, requires_grad=False))
gen_style = gram_matrix(generated[m])
style_loss += (
self.style_weight * self.loss_fn(gen_style, gt_style)
)
# smooth term
if self.regularize_weight != 0:
smooth_loss += self.regularize_weight * (
torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:]).mean() +
torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :]).mean()
)
tot = l1_loss + vgg_loss + style_loss + smooth_loss
return tot, vgg_loss, style_loss
| partialconv-master | models/loss.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright (c) 2017, Soumith Chintala. All rights reserved.
###############################################################################
'''
Code adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
Introduced partial convolutoins based padding for convolutional layers
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from .partialconv2d import *
__all__ = [
'PDVGG', 'pdvgg11', 'pdvgg11_bn', 'pdvgg13', 'pdvgg13_bn', 'pdvgg16', 'pdvgg16_bn',
'pdvgg19_bn', 'pdvgg19',
]
# __all__ = [
# 'PDVGG', 'pdvgg16_bn', 'pdvgg19_bn',
# ]
model_urls = {
'pdvgg16_bn': '',
'pdvgg19_bn': '',
'pdvgg16': '',
'pdvgg19': '',
'pdvgg11': '',
'pdvgg13': '',
'pdvgg11_bn': '',
'pdvgg13_bn': '',
# 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
# 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
# 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
# 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
# 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
# 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
# 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
# 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class PDVGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(PDVGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, PartialConv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal(m.weight, 0, 0.01)
nn.init.constant(m.bias, 0)
def make_layers_pd(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = PartialConv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def pdvgg11(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg11']))
return model
def pdvgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg11_bn']))
return model
def pdvgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg13']))
return model
def pdvgg13_bn(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg13_bn']))
return model
def pdvgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg16']))
return model
def pdvgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg19']))
return model
def pdvgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg16_bn']))
return model
def pdvgg19_bn(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = PDVGG(make_layers_pd(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['pdvgg19_bn']))
return model | partialconv-master | models/pd_vgg.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu ([email protected])
###############################################################################
import torch
import torch.nn.functional as F
from torch import nn, cuda
from torch.autograd import Variable
class PartialConv3d(nn.Conv3d):
def __init__(self, *args, **kwargs):
# whether the mask is multi-channel or not
if 'multi_channel' in kwargs:
self.multi_channel = kwargs['multi_channel']
kwargs.pop('multi_channel')
else:
self.multi_channel = False
if 'return_mask' in kwargs:
self.return_mask = kwargs['return_mask']
kwargs.pop('return_mask')
else:
self.return_mask = False
super(PartialConv3d, self).__init__(*args, **kwargs)
if self.multi_channel:
self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0], self.kernel_size[1], self.kernel_size[2])
else:
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1], self.kernel_size[2])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * self.weight_maskUpdater.shape[3] * self.weight_maskUpdater.shape[4]
self.last_size = (None, None, None, None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask_in=None):
assert len(input.shape) == 5
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
# if mask is not provided, create a mask
if self.multi_channel:
mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2], input.data.shape[3], input.data.shape[4]).to(input)
else:
mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3], input.data.shape[4]).to(input)
else:
mask = mask_in
self.update_mask = F.conv3d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=1)
self.mask_ratio = self.slide_winsize/(self.update_mask + 1e-8)
# self.mask_ratio = torch.max(self.update_mask)/(self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
# if self.update_mask.type() != input.type() or self.mask_ratio.type() != input.type():
# self.update_mask.to(input)
# self.mask_ratio.to(input)
raw_out = super(PartialConv3d, self).forward(torch.mul(input, mask_in) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
| partialconv-master | models/partialconv3d.py |
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu ([email protected])
###############################################################################
import torch
import torch.nn.functional as F
from torch import nn, cuda
from torch.autograd import Variable
class PartialConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
# whether the mask is multi-channel or not
if 'multi_channel' in kwargs:
self.multi_channel = kwargs['multi_channel']
kwargs.pop('multi_channel')
else:
self.multi_channel = False
if 'return_mask' in kwargs:
self.return_mask = kwargs['return_mask']
kwargs.pop('return_mask')
else:
self.return_mask = False
super(PartialConv2d, self).__init__(*args, **kwargs)
if self.multi_channel:
self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0], self.kernel_size[1])
else:
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * self.weight_maskUpdater.shape[3]
self.last_size = (None, None, None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask_in=None):
assert len(input.shape) == 4
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
# if mask is not provided, create a mask
if self.multi_channel:
mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2], input.data.shape[3]).to(input)
else:
mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input)
else:
mask = mask_in
self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=1)
# for mixed precision training, change 1e-8 to 1e-6
self.mask_ratio = self.slide_winsize/(self.update_mask + 1e-8)
# self.mask_ratio = torch.max(self.update_mask)/(self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv2d, self).forward(torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
| partialconv-master | models/partialconv2d.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
suite = {
# --------------------------------------------------------------------------------------------------------------
#
# METADATA
#
# --------------------------------------------------------------------------------------------------------------
"mxversion": "5.190.1",
"name": "grcuda",
"versionConflictResolution": "latest",
"version": "1.0.0",
"release": False,
"groupId": "com.nvidia.grcuda",
"developer": {
"name": "grCUDA Developers",
"organization": "grCUDA Developers",
},
# --------------------------------------------------------------------------------------------------------------
#
# DEPENDENCIES
#
# --------------------------------------------------------------------------------------------------------------
"imports": {
"suites": [
{
"name": "truffle",
"version": "c541f641249fb5d615aa8e375ddc950d3b5b3715",
"subdir": True,
"urls": [
{"url": "https://github.com/oracle/graal", "kind": "git"},
]
},
],
},
# --------------------------------------------------------------------------------------------------------------
#
# REPOS
#
# --------------------------------------------------------------------------------------------------------------
"repositories": {
},
"defaultLicense": "BSD-3",
# --------------------------------------------------------------------------------------------------------------
#
# LIBRARIES
#
# --------------------------------------------------------------------------------------------------------------
"libraries": {
},
# --------------------------------------------------------------------------------------------------------------
#
# PROJECTS
#
# --------------------------------------------------------------------------------------------------------------
"externalProjects": {
},
"projects": {
"com.nvidia.grcuda.parser.antlr": {
"subDir": "projects",
"buildEnv": {
"ANTLR_JAR": "<path:truffle:ANTLR4_COMPLETE>",
"PARSER_PATH": "<src_dir:com.nvidia.grcuda>/com/nvidia/grcuda/parser/antlr",
"OUTPUT_PATH": "<src_dir:com.nvidia.grcuda>/com/nvidia/grcuda/parser/antlr",
"PARSER_PKG": "com.nvidia.grcuda.parser.antlr",
"POSTPROCESSOR": "<src_dir:com.nvidia.grcuda.parser.antlr>/postprocessor.py",
},
"dependencies": [
"truffle:ANTLR4_COMPLETE",
],
"native": True,
"vpath": True,
},
"com.nvidia.grcuda": {
"subDir": "projects",
"license": ["BSD-3"],
"sourceDirs": ["src"],
"javaCompliance": "1.8",
"annotationProcessors": ["truffle:TRUFFLE_DSL_PROCESSOR"],
"dependencies": [
"truffle:TRUFFLE_API",
"sdk:GRAAL_SDK",
"truffle:ANTLR4",
],
"buildDependencies": ["com.nvidia.grcuda.parser.antlr"],
"checkstyleVersion": "8.8",
},
"com.nvidia.grcuda.test": {
"subDir": "projects",
"sourceDirs": ["src"],
"dependencies": [
"com.nvidia.grcuda",
"mx:JUNIT",
"truffle:TRUFFLE_TEST"
],
"checkstyle": "com.nvidia.grcuda",
"javaCompliance": "1.8",
"annotationProcessors": ["truffle:TRUFFLE_DSL_PROCESSOR"],
"workingSets": "Truffle,CUDA",
"testProject": True,
},
},
"licenses": {
"BSD-3": {
"name": "3-Clause BSD License",
"url": "http://opensource.org/licenses/BSD-3-Clause",
},
},
# --------------------------------------------------------------------------------------------------------------
#
# DISTRIBUTIONS
#
# --------------------------------------------------------------------------------------------------------------
"distributions": {
"GRCUDA": {
"dependencies": [
"com.nvidia.grcuda",
],
"distDependencies": [
"truffle:TRUFFLE_API",
"sdk:GRAAL_SDK",
],
"sourcesPath": "grcuda.src.zip",
"description": "grCUDA",
},
"GRCUDA_UNIT_TESTS": {
"description": "grCUDA unit tests",
"dependencies": [
"com.nvidia.grcuda.test",
],
"exclude": ["mx:JUNIT"],
"distDependencies": [
"GRCUDA",
"truffle:TRUFFLE_TEST"
],
"sourcesPath": "grcuda.tests.src.zip",
"testDistribution": True,
},
},
}
| grcuda-master | mx.grcuda/suite.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import mx
import mx_subst
def _get_src_dir(projectname):
for suite in mx.suites():
for p in suite.projects:
if p.name == projectname:
if len(p.source_dirs()) > 0:
return p.source_dirs()[0]
else:
return p.dir
mx.abort("Could not find src dir for project %s" % projectname)
mx_subst.path_substitutions.register_with_arg('src_dir', _get_src_dir)
| grcuda-master | mx.grcuda/mx_grcuda.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Fixes up the Java code generated by ANTLR4 to get checkstyle pass
Adds copyright header, disables checkstyle and Eclipse code formatter.
"""
import re
import sys
JAVA_COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
DISABLE_CHECKSTYLE = '// Checkstyle: stop'
DISABLE_FORMATTER = '// @formatter:off'
def transform(lines):
"""Adds header, disables checkstyle and formatter, then applies all
transforms on every line."""
out_lines = []
out_lines.append(JAVA_COPYRIGHT_HEADER + '\n')
out_lines.append(DISABLE_CHECKSTYLE + '\n')
out_lines.append(DISABLE_FORMATTER + '\n')
for line in lines:
l = line
# additional line specific transforms
out_lines.append(l)
# make sure that file ends with a new line
if out_lines[-1][-1] != '\n':
out_lines[-1] += '\n'
return ''.join(out_lines)
def main():
if len(sys.argv) < 2:
print('file arguments missing', file=sys.stderr)
sys.exit(1)
file_names = sys.argv[1:]
for file_name in file_names:
with open(file_name, 'rt') as in_lines:
out_lines = transform(in_lines)
with open(file_name, 'w') as out_file:
out_file.write(out_lines)
if __name__ == '__main__':
main()
| grcuda-master | projects/com.nvidia.grcuda.parser.antlr/postprocessor.py |
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
import tensorrt as trt
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], ".."))
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
class ModelData(object):
INPUT_NAME ="conv2d_input"
INPUT_SHAPE = (1, 28, 28)
OUTPUT_NAME = "dense_2/Softmax"
def build_engine(model_file):
# For more information on TRT basics, refer to the introductory samples.
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = 4 << 30 # 4 GiB
builder.max_batch_size = 1
# Parse the Uff Network
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
parser.parse(model_file, network)
# Build and return an engine.
return builder.build_cuda_engine(network)
def main():
if len(sys.argv) != 3:
print(f'{sys.argv[0]} model_file.uff engine_file.engine')
return
model_file = sys.argv[1]
engine_file = sys.argv[2]
print(f'model file: {model_file}')
with build_engine(model_file) as engine, open(engine_file, 'wb') as f:
f.write(engine.serialize())
print(f'written engine to {engine_file}')
if __name__ == '__main__':
main()
| grcuda-master | examples/tensorrt/python/build_engine.py |
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
"""This file contains functions for training a TensorFlow model"""
import datetime
import tensorflow as tf
import numpy as np
if not tf.__version__.startswith('1.'):
print('Graph freezing only supported in TF 1.x')
exit(1)
def process_dataset():
# Import the data
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Reshape the data
NUM_TRAIN = 60000
NUM_TEST = 10000
x_train = np.reshape(x_train, (NUM_TRAIN, 28, 28, 1))
x_test = np.reshape(x_test, (NUM_TEST, 28, 28, 1))
return x_train, y_train, x_test, y_test
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=[28, 28, 1]))
model.add(tf.keras.layers.AveragePooling2D())
model.add(tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(tf.keras.layers.AveragePooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=120, activation='relu'))
model.add(tf.keras.layers.Dense(units=84, activation='relu'))
model.add(tf.keras.layers.Dense(units=10, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
def save(model, filename):
# First freeze the graph and remove training nodes.
output_names = model.output.op.name
sess = tf.compat.v1.keras.backend.get_session()
frozen_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [output_names])
frozen_graph = tf.compat.v1.graph_util.remove_training_nodes(frozen_graph)
# Save the model
with open(filename, "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
def main():
print('using TensorFlow version: ', tf.__version__)
x_train, y_train, x_test, y_test = process_dataset()
model = create_model()
model.summary()
# Train the model on the data
num_epochs = 20
log_dir = 'logs/train' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, update_freq='epoch', write_grads=True)
model.fit(x_train, y_train, batch_size=128, validation_split=0.1, epochs=num_epochs, callbacks=[tensorboard_callback])
# Evaluate the model on test data
log_dir = 'logs/test' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, update_freq='batch', write_grads=True)
eval_loss, eval_accuracy = model.evaluate(x_test, y_test, callbacks=[tensorboard_callback])
print(f'after {num_epochs} training epochs: eval loss={eval_loss}, eval accuracy={eval_accuracy}')
# Save model
save(model, filename="../models/lenet5.pb")
if __name__ == '__main__':
main()
| grcuda-master | examples/tensorrt/python/model.py |
#
# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
# This sample uses a UFF MNIST model to create a TensorRT Inference Engine
from random import randint
from PIL import Image
import numpy as np
import pycuda.driver as cuda
# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
import sys, os
# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# Loads a test case into the provided pagelocked_buffer.
def load_normalized_test_case(image_file, pagelocked_buffer):
# Flatten the image into a 1D array, normalize, and copy to pagelocked memory.
img = np.array(Image.open(image_file)).ravel()
for idx, pixel in enumerate(img):
print('@' if pixel >= 128 else ' ', end='')
if idx % 28 == 27:
print('')
np.copyto(pagelocked_buffer, 1.0 - img / 255.0)
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def main():
if len(sys.argv) != 3:
print(f'{sys.argv[0]} file.engine image.pgm')
return
engine_file = sys.argv[1]
image_file = sys.argv[2]
with open(engine_file, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
with runtime.deserialize_cuda_engine(f.read()) as engine:
# Build an engine, allocate buffers and create a stream.
# For more information on buffer allocation, refer to the introductory samples.
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
load_normalized_test_case(image_file, pagelocked_buffer=inputs[0].host)
# For more information on performing inference, refer to the introductory samples.
# The common.do_inference function will return a list of outputs - we only have one in this case.
[output] = do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
for digit, prob in enumerate(output):
print(f'{digit}: {prob:.6f}')
pred = np.argmax(output)
print(f'Prediction: {pred}')
if __name__ == '__main__':
main()
| grcuda-master | examples/tensorrt/python/load_and_sample.py |
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from functools import reduce
from PIL import Image
import tensorflow as tf
def main():
"""Downloads MNIST dataset through Keras, extracts the first ten
unique digits from the test set and writes them out as in PGM. """
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
def lowest_index(agg, element):
index, digit = element
if digit not in agg:
agg[digit] = index
else:
agg[digit] = min(agg[digit], index)
return agg
for digit, first_index in sorted(reduce(lowest_index, enumerate(y_test), {}).items()):
file_name = f'{digit}.pgm'
print('writing ' + file_name + '...')
image = Image.fromarray(255 - x_test[first_index])
image.save(file_name)
if __name__ == '__main__':
main()
| grcuda-master | examples/tensorrt/data/download_mnist_test_digits.py |
from pathlib import Path
from setuptools import setup, find_packages
# Version info -- read without importing
_locals = {}
with open("runx/_version.py") as fp:
exec(fp.read(), None, _locals)
version = _locals["__version__"]
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
parent = Path(__file__).resolve().parent
setup(
name="runx",
version=version,
author="Andrew Tao",
author_email="[email protected]",
description="runx - experiment manager for machine learning research",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NVIDIA/runx",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=requirements,
python_requires='>=3.6',
)
| runx-master | setup.py |
from .collections import AttrDict
__C = AttrDict()
cfg = __C
# Random note: avoid using '.ON' as a config key since yaml converts it to True;
__C.FARM = None
__C.LOGROOT = None
__C.EXP_NAME = None
| runx-master | runx/config.py |
__version_info__ = (0, 0, 11)
__version__ = ".".join(map(str, __version_info__))
| runx-master | runx/_version.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Routines to build farm submission commands.
"""
import os
from .config import cfg
from .utils import exec_cmd
def expand_resources(resources):
"""
Construct the submit_job arguments from the resource dict.
In general, a k,v from the dict turns into an argument '--k v'.
If the value is a boolean, then the argument turns into a flag.
If the value is a list/tuple, then multiple '--k v' are presented,
one for each list item.
:resources: a dict of arguments for the farm submission command.
"""
cmd = ''
for field, val in resources.items():
if type(val) is bool:
if val is True:
cmd += '--{} '.format(field)
elif type(val) is list or type(val) is tuple:
for mp in val:
cmd += '--{} {} '.format(field, mp)
else:
cmd += '--{} {} '.format(field, val)
return cmd
def build_draco(train_cmd, job_name, resources, logdir):
"""
For using Draco, only for NVIDIA-ADLR folks
See build_farm_cmd for arg description
"""
assert 'submit_job' in cfg.SUBMIT_CMD, \
'Expected \'submit_job\' as SUBMIT_CMD. Exiting ...'
submit_cmd = cfg.SUBMIT_CMD + ' '
submit_cmd += expand_resources(resources)
submit_cmd += f' --name {job_name}'
submit_cmd += f' --command \' {train_cmd} \''
submit_cmd += f' --logdir {logdir}/gcf_log'
return submit_cmd
def build_ngc_generic(train_cmd, job_name, resources, logdir):
"""
Compose the farm submission command for generic NGC users, folks
both inside and outside of Nvidia.
The SUBMIT_CMD should be 'ngc batch run'.
See build_farm_cmd for arg description
"""
assert cfg.SUBMIT_CMD == 'ngc batch run', \
'Expected SUBMIT_CMD to be \'ngc batch run\'. Exiting ...'
submit_cmd = cfg.SUBMIT_CMD + ' '
submit_cmd += expand_resources(resources)
submit_cmd += f' --name {job_name}'
submit_cmd += f' --commandline \' {train_cmd} \''
submit_cmd += f' --workspace {cfg.WORKSPACE}:{cfg.NGC_LOGROOT}:RW'
return submit_cmd
def build_ngc(train_cmd, job_name, resources, logdir):
"""
For using NGC with submit_job, only for NVIDIA-ADLR folks.
See build_farm_cmd for arg description
"""
if 'submit_job' in cfg.SUBMIT_CMD:
ngc_logdir = logdir.replace(cfg.LOGROOT, cfg.NGC_LOGROOT)
return build_draco(train_cmd, job_name, resources, ngc_logdir)
else:
return build_ngc_generic(train_cmd, job_name, resources, logdir)
def build_generic(train_cmd, job_name, resources, logdir):
"""
Generic farm support
See build_farm_cmd for arg description
"""
if 'submit_job' in cfg.SUBMIT_CMD:
ngc_logdir = logdir.replace(cfg.LOGROOT, cfg.NGC_LOGROOT)
return build_draco(train_cmd, job_name, resources, ngc_logdir)
else:
return build_ngc_generic(train_cmd, job_name, resources, logdir)
def build_farm_cmd(train_cmd, job_name, resources, logdir):
"""
This function builds a farm submission command.
:train_cmd: full training command
:job_name: unique job_name, to be used for tracking
:resources: farm submission command args, pulled from .runx
:logdir: target log directory
"""
if 'ngc' in cfg.FARM:
return build_ngc(train_cmd, job_name, resources, logdir)
elif 'draco' in cfg.FARM:
return build_draco(train_cmd, job_name, resources, logdir)
else:
raise f'Unsupported farm: {cfg.FARM}'
def upload_to_ngc(staging_logdir):
"""
Upload single run's code to NGC workspace.
Within the job, the workspace will be mounted at: <NGC_LOGROOT>.
The full path of the logdir in the job is: <NGC_LOGROOT>/<exp_name>/<run_name>
:staging_logdir: path to the staging logdir, on the client machine
"""
fields = staging_logdir.split('/')
exp_name = fields[-2]
run_name = fields[-1]
ngc_workspace = cfg.WORKSPACE
target_dir = os.path.join(exp_name, run_name)
msg = 'Uploading experiment to {} in workpace {} ...'
print(msg.format(target_dir, ngc_workspace))
cmd = ('ngc workspace upload --source {staging_logdir} '
'--destination {target_dir} {workspace}')
cmd = cmd.format(staging_logdir=staging_logdir, target_dir=target_dir,
workspace=ngc_workspace)
exec_cmd(cmd)
| runx-master | runx/farm.py |
# flake8: noqa
from ._version import __version__, __version_info__
| runx-master | runx/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""A simple attribute dictionary used for representing configuration options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class AttrDict(dict):
"""Dictionary of options"""
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
if name in self:
return self[name]
raise AttributeError(name)
def __setattr__(self, name, value):
if not self.__dict__[AttrDict.IMMUTABLE]:
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError(
'Attempted to set "{}" to "{}", but AttrDict is immutable'.
format(name, value)
)
def immutable(self, is_immutable):
"""
Set immutability to is_immutable and recursively apply the setting
to all nested AttrDicts
:param is_immutable: boolean, whether the dictionary is immutable or not
"""
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
"""
Property, whether dictionary is immutable or not
:return: boolean, True if dictionary is immutable
"""
return self.__dict__[AttrDict.IMMUTABLE]
| runx-master | runx/collections.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import yaml
import shlex
import json
from warnings import warn
import subprocess
from subprocess import call, getoutput, DEVNULL
from .config import cfg
def exec_cmd(cmd):
"""
Execute a command and print stderr/stdout to the console
"""
print(cmd)
result = subprocess.run(cmd, stderr=subprocess.PIPE, shell=True)
if result.stderr:
message = result.stderr.decode("utf-8")
print(message)
trn_names = ('trn', 'train', 'training')
val_names = ('val', 'validate', 'validation', 'test')
def get_cfg(key):
if key in cfg:
return cfg[key]
else:
warn(f'Please define {key} in .runx or your experiment file')
return None
def read_config_item(config, key, optional=True):
if key in config:
return config[key]
elif optional:
return None
else:
raise f'can\'t find {key} in config'
def read_config_file(args=None):
local_config_fn = './.runx'
home = os.path.expanduser('~')
global_config_fn = '{}/.config/runx.yml'.format(home)
if args is not None and hasattr(args, 'config_file') and \
args.config_file is not None and \
os.path.isfile(args.config_file):
config_fn = args.config_file
elif os.path.isfile(local_config_fn):
config_fn = local_config_fn
elif os.path.exists(global_config_fn):
config_fn = global_config_fn
else:
raise('can\'t find file ./.runx or ~/.config/runx.yml config files')
if 'FullLoader' in dir(yaml):
global_config = yaml.load(open(config_fn), Loader=yaml.SafeLoader)
else:
global_config = yaml.safe_load(open(config_fn))
return global_config
def read_config(args):
'''
Merge the global and experiment config files into a single config
Need to support the case where there is no FARM defined, which should
be fine for interactive jobs.
'''
global_config = read_config_file(args)
if hasattr(args, 'farm') and args.farm is not None:
global_config['FARM'] = args.farm
# Support the case of no farm
if 'FARM' not in global_config or \
global_config['FARM'] not in global_config:
no_farm = True
else:
no_farm = False
farm_name = read_config_item(global_config, 'FARM')
# Dereference the farm config items
for k, v in global_config[farm_name].items():
global_config[k] = v
# Inherit global config into experiment config:
experiment = global_config
# Merge experiment settings into the global configuration.
# This allows an experiment yaml to override the settings in the .runx
if hasattr(args, 'exp_yml'):
exp_config = yaml.load(open(args.exp_yml), Loader=yaml.SafeLoader)
for k, v in exp_config.items():
if k in experiment:
experiment.update(v)
else:
experiment[k] = v
if args.exp_name is not None:
cfg.EXP_NAME = args.exp_name
elif args.exp_yml is None:
cfg.EXP_NAME = 'none'
else:
cfg.EXP_NAME = os.path.splitext(os.path.basename(args.exp_yml))[0]
if no_farm:
cfg.FARM = 'NOFARM'
cfg.LOGROOT = read_config_item(experiment, 'LOGROOT')
cfg.SUBMIT_CMD = 'echo'
cfg.PYTHONPATH = ''
# have to define resources because runx expects it
global_config['RESOURCES'] = {}
else:
cfg.FARM = read_config_item(experiment, 'FARM')
cfg.LOGROOT = read_config_item(experiment, 'LOGROOT')
cfg.SUBMIT_CMD = read_config_item(experiment, 'SUBMIT_CMD')
cfg.PYTHONPATH = read_config_item(experiment, 'PYTHONPATH')
if 'ngc' in cfg.FARM:
cfg.NGC_LOGROOT = read_config_item(experiment, 'NGC_LOGROOT')
cfg.WORKSPACE = read_config_item(experiment, 'WORKSPACE')
return experiment
def get_logroot():
global_config = read_config_file()
return read_config_item(global_config, 'LOGROOT')
def get_bigfiles(root):
output = getoutput('find {} -size +100k'.format(root))
if len(output):
bigfiles = output.split('\n')
return bigfiles
else:
return []
def save_code(logdir, coderoot):
zip_outfile = os.path.join(logdir, 'code.tgz')
# skip over non-sourcecode items
exclude_list = ['*.pth', '*.jpg', '*.jpeg', '*.pyc', '*.so', '*.o',
'*.git', '__pycache__', '*~']
bigfiles = get_bigfiles(coderoot)
exclude_str = ''
for ex in exclude_list + bigfiles:
exclude_str += ' --exclude=\'{}\''.format(ex)
cmd = 'tar -czvf {} {} {}'.format(zip_outfile, exclude_str, coderoot)
call(shlex.split(cmd), stdout=DEVNULL, stderr=DEVNULL)
def save_hparams(hparams, logdir):
"""
Save hyperparameters into a json file
"""
json_fn = os.path.join(logdir, 'hparams.json')
if os.path.isfile(json_fn):
return
with open(json_fn, 'w') as outfile:
json.dump(hparams, outfile, indent=4)
class _CallableProxy:
def __init__(self, real_callable, post_hook=None):
self.real_callable = real_callable
self.post_hook = post_hook
def __call__(self, *args, **kwargs):
ret_val = self.real_callable(*args, **kwargs)
if self.post_hook is not None:
self.post_hook()
return ret_val
class ConditionalProxy:
"""
This object can be used to serve as a proxy on an object where we want to
forward all function calls along to the dependent object, but only when
some condition is true. For example, the primary use case for this object
is to deal with that fact that in a distributed training job, we only want
to manage artifacts (checkpoints, logs, TB summaries) on the rank-0
process.
So, let's say that we have this class:
```
class Foo:
def bar(self, val):
pass
def baz(self, val1, val2):
pass
```
and we wrap it with this object:
```
proxy = ConditionalProxy(Foo(), rank == 0)
proxy.bar(42)
proxy.baz(10, 20)
proxy.some_other_function('darn it') # Throws an exception because `Foo`
# doesn't have an implementation for
# this.
```
In this case, if `rank == 0`, then we will end up calling `Foo.bar` and
`Foo.baz`.
If `rank != 0`, then the calls will be ignored.
In addition to the basic usage, you can also add a `post_hook` to the
proxy, which is a callable that takes no arguments. The proxy will call
that function after each function call made through the proxy, but only
when `condition == True`.
"""
def __init__(self, real_object, condition, post_hook=None):
self.real_object = real_object
self.condition = condition
self.post_hook = post_hook
@staticmethod
def _throw_away(*args, **kwargs):
pass
def __getattr__(self, name):
if not self.condition:
# When `self.condition == False`, then we want to return a function
# that can take any form of arguments, and does nothing. This works
# under the assumption that the only API interface for the
# dependent object is function, e.g. this would be awkward if the
# caller was trying to access a member variable.
return ConditionalProxy._throw_away
real_fn = getattr(self.real_object, name)
# Wrap the return function in a `_CallableProxy` so that we can
# invoke the `self.post_hook`, if specified, after the real function
# executes.
return _CallableProxy(real_fn, self.post_hook)
| runx-master | runx/utils.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
from collections import OrderedDict
from coolname import generate_slug
from datetime import datetime
from shutil import copytree, ignore_patterns
import os
import re
import sys
import subprocess
import argparse
import itertools
from .utils import read_config, exec_cmd, get_cfg
from .farm import build_farm_cmd, upload_to_ngc
parser = argparse.ArgumentParser(description='Experiment runner')
parser.add_argument('exp_yml', type=str,
help='Experiment yaml file')
parser.add_argument('--exp_name', type=str, default=None,
help=('Override the *experiment* name, which normally is '
'taken from the experiment yaml filename.'))
parser.add_argument('--tag', type=str, default=None,
help=('Add a string to the generated *run* name for '
' identification.'))
parser.add_argument('--no_cooldir', action='store_true',
help=('For the *run* name, don\'t auto-generate a '
'coolname or datestring, only use the tag'))
parser.add_argument('--no_run', '-n', action='store_true',
help='Don\'t run, just display the command.')
parser.add_argument('--interactive', '-i', action='store_true',
help='Run interactively instead of submitting to farm.')
parser.add_argument('--farm', type=str, default=None,
help='Select farm for workstation submission')
parser.add_argument('--config_file', '-c', type=str, default=None,
help='Use this file instead of .runx')
args = parser.parse_args()
def expand_hparams(hparams):
"""
Construct the training script args from the hparams
"""
cmd = ''
for field, val in hparams.items():
if type(val) is bool:
if val is True:
cmd += '--{} '.format(field)
elif val != 'None':
cmd += '--{} {} '.format(field, val)
return cmd
def construct_cmd(cmd, hparams, logdir):
"""
Build training command by starting with user-supplied 'CMD'
and then adding in hyperparameters, which came from expanding the
cross-product of all permutations from the experiment yaml file.
We always copy the code to the target logdir and run from there.
:cmd: farm submission command
:hparams: hyperparams for training command
"""
# First, add hyperparameters
cmd += ' ' + expand_hparams(hparams)
# Expand PYTHONPATH, if necessary
if get_cfg('PYTHONPATH') is not None:
pythonpath = get_cfg('PYTHONPATH')
pythonpath = pythonpath.replace('LOGDIR', logdir)
else:
pythonpath = f'{logdir}/code'
# For signalling reasons, we have to insert the exec here when using submit_job.
# Nvidia-internal thing.
exec_str = ''
if 'submit_job' in get_cfg('SUBMIT_CMD'):
exec_str = 'exec'
cmd = f'cd {logdir}/code; PYTHONPATH={pythonpath} {exec_str} {cmd}'
return cmd
def save_cmd(cmd, logdir):
"""
Record the submit command
"""
fp = open(os.path.join(logdir, 'submit_cmd.sh'), 'w')
fp.write(cmd)
fp.write('\n')
fp.close()
def islist(elem):
return type(elem) is list or type(elem) is tuple
def cross_product_hparams(hparams):
"""
This function takes in just the hyperparameters for the target script,
such as your main.py.
inputs:
hparams is a dict, where each key is the name of a commandline arg and
the value is the target value of the arg.
However any arg can also be a list and so this function will calculate
the cross product for all combinations of all args.
output:
The return value is a sequence of lists. Each list is one of the
permutations of argument values.
"""
hparam_values = []
# turn every hyperparam into a list, to prep for itertools.product
for elem in hparams.values():
if islist(elem):
hparam_values.append(elem)
else:
hparam_values.append([elem])
expanded_hparams = itertools.product(*hparam_values)
# have to do this in order to know length
expanded_hparams, dup_expanded = itertools.tee(expanded_hparams, 2)
expanded_hparams = list(expanded_hparams)
num_cases = len(list(dup_expanded))
return expanded_hparams, num_cases
def get_field(adict, f, required=True):
if required:
assert f in adict, 'expected {} to be defined in experiment'.format(f)
return adict[f] if f in adict else None
def do_keyword_expansion(alist, pairs):
"""
Substitute a string in place of certain keywords
"""
if type(alist) is list or type(alist) is tuple:
for i, v in enumerate(alist):
if type(v) == str:
for k, v in pairs:
alist[i] = alist[i].replace(k, v)
elif type(alist) is dict:
for a_k, a_v in alist.items():
if type(a_v) == str:
for k, v in pairs:
alist[a_k] = alist[a_k].replace(k, v)
else:
raise
def make_cool_names():
tagname = args.tag + '_' if args.tag else ''
datestr = datetime.now().strftime("_%Y.%m.%d_%H.%M")
if args.no_cooldir:
coolname = tagname
else:
coolname = tagname + generate_slug(2) + datestr
# Experiment directory is the parent of N runs
expdir = os.path.join(get_cfg('LOGROOT'), get_cfg('EXP_NAME'))
# Each run has a logdir
logdir_name = coolname
logdir = os.path.join(expdir, logdir_name)
# Jobname is a unique name for the batch job
job_name = '{}_{}'.format(get_cfg('EXP_NAME'), coolname)
return job_name, logdir, logdir_name, expdir
def copy_code(logdir, runroot, code_ignore_patterns):
"""
Copy sourcecode to logdir's code directory
"""
print('Copying codebase to {} ...'.format(logdir))
tgt_code_dir = os.path.join(logdir, 'code')
if code_ignore_patterns is not None:
code_ignore_patterns = ignore_patterns(*code_ignore_patterns)
copytree(runroot, tgt_code_dir, ignore=code_ignore_patterns)
def hacky_substitutions(hparams, resource_copy, logdir, runroot):
# Substitute the true logdir in for the magic variable LOGDIR
do_keyword_expansion(hparams, [('LOGDIR', logdir)])
do_keyword_expansion(resource_copy, [('LOGDIR', logdir)])
# Build hparams to save out after LOGDIR but before deleting
# the key 'SUBMIT_JOB.NODES', so that it is part of the hparams saved
# This is done so that we can see the node count in sumx.
# hparams_out = hparams.copy()
# SUBMIT_JOB.NODES is a hyperparmeter that sets the node count
# This is actually a resource, so when we find this arg, we delete
# it from the list of hyperparams that the training script sees.
if 'SUBMIT_JOB.NODES' in hparams:
resource_copy['nodes'] = hparams['SUBMIT_JOB.NODES']
del hparams['SUBMIT_JOB.NODES']
if 'SUBMIT_JOB.PARTITION' in hparams:
resource_copy['partition'] = hparams['SUBMIT_JOB.PARTITION']
del hparams['SUBMIT_JOB.PARTITION']
# Record the directory from whence the experiments were launched
# hparams_out['srcdir'] = runroot
# return hparams_out
def get_tag(hparams):
# Pull tag from hparams and then remove it
# Also can do variable substitution into tag
if 'RUNX.TAG' in hparams:
tag_val = hparams['RUNX.TAG']
# do variable expansion:
for sub_key, sub_val in hparams.items():
search_str = '{' + sub_key + '}'
tag_val = re.sub(search_str, str(sub_val), tag_val)
hparams['RUNX.TAG'] = tag_val
args.tag = tag_val
del hparams['RUNX.TAG']
def skip_run(hparams):
return 'RUNX.SKIP' in hparams and hparams['RUNX.SKIP']
def get_code_ignore_patterns(experiment):
if 'CODE_IGNORE_PATTERNS' in experiment:
code_ignore_patterns = experiment['CODE_IGNORE_PATTERNS']
else:
code_ignore_patterns = '.git,*.pyc,docs*,test*'
code_ignore_patterns += ',*.pth' # don't copy checkpoints
code_ignore_patterns = code_ignore_patterns.split(',')
return code_ignore_patterns
def run_yaml(experiment, runroot):
"""
Run an experiment, expand hparams
"""
resources = get_field(experiment, 'RESOURCES')
code_ignore_patterns = get_code_ignore_patterns(experiment)
ngc_batch = 'ngc' in get_cfg('FARM') and not args.interactive
experiment_cmd = experiment['CMD']
# Build the args that the submit_cmd will see
yaml_hparams = OrderedDict()
# Add yaml_hparams
for k, v in experiment['HPARAMS'].items():
yaml_hparams[k] = v
# Calculate cross-product of hyperparams
expanded_hparams, num_cases = cross_product_hparams(yaml_hparams)
# Run each permutation
for i, hparam_vals in enumerate(expanded_hparams):
hparam_vals = list(hparam_vals)
hparam_keys = list(yaml_hparams.keys())
# hparams to use for experiment
hparams = {k: v for k, v in zip(hparam_keys, hparam_vals)}
if skip_run(hparams):
continue
get_tag(hparams)
job_name, logdir, coolname, expdir = make_cool_names()
resource_copy = resources.copy()
"""
A few different modes of operation:
1. interactive runs
a. copy local code to logdir under LOGROOT
b. cd to logdir, execute cmd
2. farm submission: non-NGC
In this regime, the LOGROOT is expected to be visible to the farm's
compute nodes
a. copy local code to logdir under LOGROOT
b. call cmd, which should invoke whatever you have specified for
SUBMIT_JOB
3. farm submission: NGC
a. copy local code to logdir under LOGROOT
b. ngc workspace upload the logdir to NGC_WORKSPACE
c. call cmd, which should invoke SUBMIT_JOB==`ngc batch run`
"""
if ngc_batch:
ngc_logdir = logdir.replace(get_cfg('LOGROOT'),
get_cfg('NGC_LOGROOT'))
hacky_substitutions(
hparams, resource_copy, ngc_logdir, runroot)
cmd = construct_cmd(experiment_cmd, hparams, ngc_logdir)
else:
hacky_substitutions(
hparams, resource_copy, logdir, runroot)
cmd = construct_cmd(experiment_cmd, hparams, logdir)
if not args.interactive:
cmd = build_farm_cmd(cmd, job_name, resource_copy, logdir)
if args.no_run:
print(cmd)
continue
# copy code to NFS-mounted share
copy_code(logdir, runroot, code_ignore_patterns)
# save some meta-data from run
save_cmd(cmd, logdir)
# upload to remote farm
if ngc_batch:
upload_to_ngc(logdir)
subprocess.call(['chmod', '-R', 'a+rw', expdir])
os.chdir(logdir)
if args.interactive:
print('Running job {}'.format(job_name))
else:
print('Submitting job {}'.format(job_name))
exec_cmd(cmd)
def run_experiment(exp_fn):
"""
Run an experiment, given a global config file + an experiment file.
The global config sets defaults that are inherited by the experiment.
"""
experiment = read_config(args)
assert 'HPARAMS' in experiment, 'experiment file is missing hparams'
# Iterate over hparams if it's a list
runroot = os.getcwd()
if isinstance(experiment['HPARAMS'], (list, tuple)):
# Support inheritance from the first hparams item in list
first_hparams = experiment['HPARAMS'][0].copy()
for hparams_set in experiment['HPARAMS']:
hparams = first_hparams.copy()
# Inheritance = first hparam set, updated with current hparam set
hparams.update(hparams_set)
# create a clean copy of the experiment and fill in hparams
experiment_copy = experiment.copy()
experiment_copy['HPARAMS'] = hparams
run_yaml(experiment_copy, runroot)
else:
run_yaml(experiment, runroot)
def main():
if os.path.exists(args.exp_yml):
run_experiment(args.exp_yml)
else:
print('couldn\'t find experiment file {}'.format(args.exp_yml))
sys.exit()
if __name__ == '__main__':
main()
| runx-master | runx/runx.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from collections import defaultdict
from contextlib import contextmanager
from shutil import copyfile
import csv
import os
import re
import shlex
import subprocess
import time
try:
from torch.utils.tensorboard import SummaryWriter
except ModuleNotFoundError:
from tensorboardX import SummaryWriter
import torch
try:
from .utils import (get_logroot, save_hparams, trn_names, val_names,
ConditionalProxy)
except ImportError:
# This is to allow the unit tests to run properly
from utils import (get_logroot, save_hparams, trn_names, val_names,
ConditionalProxy)
def is_list(x):
return isinstance(x, (list, tuple))
def get_gpu_utilization_pct():
'''
Use nvidia-smi to capture the GPU utilization, which is reported as an
integer in range 0-100.
'''
util = subprocess.check_output(
shlex.split('nvidia-smi --query-gpu="utilization.gpu" '
'--format=csv,noheader,nounits -i 0'))
util = util.decode('utf-8')
util = util.replace('\n', '')
return int(util)
class LogX(object):
def __init__(self, rank=0):
self.initialized = False
def initialize(self, logdir=None, coolname=False, hparams=None,
tensorboard=False, no_timestamp=False, global_rank=0,
eager_flush=True):
'''
Initialize logx
inputs
- logdir - where to write logfiles
- tensorboard - whether to write to tensorboard file
- global_rank - must set this if using distributed training, so we only
log from rank 0
- coolname - generate a unique directory name underneath logdir, else
use logdir as output directory
- hparams - only use if not launching jobs with runx, which also saves
the hparams.
- eager_flush - call `flush` after every tensorboard write
'''
self.rank0 = (global_rank == 0)
self.initialized = True
if logdir is not None:
self.logdir = logdir
else:
logroot = get_logroot()
if coolname:
from coolname import generate_slug
self.logdir = os.path.join(logroot, generate_slug(2))
else:
self.logdir = os.path.join(logroot, 'default')
# confirm target log directory exists
if not os.path.isdir(self.logdir):
os.makedirs(self.logdir, exist_ok=True)
if hparams is not None and self.rank0:
save_hparams(hparams, self.logdir)
# Tensorboard file
if self.rank0 and tensorboard:
self.tb_writer = SummaryWriter(log_dir=self.logdir,
flush_secs=1)
else:
self.tb_writer = None
self.eager_flush = eager_flush
# This allows us to use the tensorboard with automatic checking of both
# the `tensorboard` condition, as well as ensuring writes only happen
# on rank0. Any function supported by `SummaryWriter` is supported by
# `ConditionalProxy`. Additionally, flush will be called after any call
# to this.
self.tensorboard = ConditionalProxy(
self.tb_writer,
tensorboard and self.rank0,
post_hook=self._flush_tensorboard,
)
if not self.rank0:
return
# Metrics file
metrics_fn = os.path.join(self.logdir, 'metrics.csv')
self.metrics_fp = open(metrics_fn, mode='a+')
self.metrics_writer = csv.writer(self.metrics_fp, delimiter=',')
# Log file
log_fn = os.path.join(self.logdir, 'logging.log')
self.log_file = open(log_fn, mode='a+')
# save metric
self.save_metric = None
self.best_metric = None
self.save_ckpt_fn = ''
# Find the existing best checkpoint, and update `best_metric`,
# if available
self.best_ckpt_fn = self.get_best_checkpoint() or ''
if self.best_ckpt_fn:
best_chk = torch.load(self.best_ckpt_fn, map_location='cpu')
self.best_metric = best_chk.get('__metric', None)
self.epoch = defaultdict(lambda: 0)
self.no_timestamp = no_timestamp
# Initial timestamp, so that epoch time calculation is correct
phase = 'start'
csv_line = [phase]
# add epoch/iter
csv_line.append('{}/step'.format(phase))
csv_line.append(0)
# add timestamp
if not self.no_timestamp:
# this feature is useful for testing
csv_line.append('timestamp')
csv_line.append(time.time())
self.metrics_writer.writerow(csv_line)
self.metrics_fp.flush()
def __del__(self):
if self.initialized and self.rank0:
self.metrics_fp.close()
self.log_file.close()
def msg(self, msg):
'''
Print out message to std and to a logfile
'''
if not self.rank0:
return
print(msg)
self.log_file.write(msg + '\n')
self.log_file.flush()
def add_image(self, path, img, step=None):
'''
Write an image to the tensorboard file
'''
self.tensorboard.add_image(path, img, step)
def add_scalar(self, name, val, idx):
'''
Write a scalar to the tensorboard file
'''
self.tensorboard.add_scalar(name, val, idx)
def _flush_tensorboard(self):
if self.eager_flush and self.tb_writer is not None:
self.tb_writer.flush()
@contextmanager
def suspend_flush(self, flush_at_end=True):
prev_flush = self.eager_flush
self.eager_flush = False
yield
self.eager_flush = prev_flush
if flush_at_end:
self._flush_tensorboard()
def metric(self, phase, metrics, epoch=None):
"""Record train/val metrics. This serves the dual-purpose to write these
metrics to both a tensorboard file and a csv file, for each parsing by
sumx.
Arguments:
phase: 'train' or 'val'. sumx will only summarize val metrics.
metrics: dictionary of metrics to record
global_step: (optional) epoch or iteration number
"""
if not self.rank0:
return
# define canonical phase
if phase in trn_names:
canonical_phase = 'train'
elif phase in val_names:
canonical_phase = 'val'
else:
raise('expected phase to be one of {} {}'.format(str(val_names,
trn_names)))
if epoch is not None:
self.epoch[canonical_phase] = epoch
# Record metrics to csv file
csv_line = [canonical_phase]
for k, v in metrics.items():
csv_line.append(k)
csv_line.append(v)
# add epoch/iter
csv_line.append('epoch')
csv_line.append(self.epoch[canonical_phase])
# add timestamp
if not self.no_timestamp:
# this feature is useful for testing
csv_line.append('timestamp')
csv_line.append(time.time())
# To save a bit of disk space, only save validation metrics
if canonical_phase == 'val':
self.metrics_writer.writerow(csv_line)
self.metrics_fp.flush()
# Write updates to tensorboard file
with self.suspend_flush():
for k, v in metrics.items():
self.add_scalar('{}/{}'.format(phase, k), v,
self.epoch[canonical_phase])
# if no step, then keep track of it automatically
if epoch is None:
self.epoch[canonical_phase] += 1
@staticmethod
def is_better(save_metric, best_metric, higher_better):
return best_metric is None or \
higher_better and (save_metric > best_metric) or \
not higher_better and (save_metric < best_metric)
def save_model(self, save_dict, metric, epoch, higher_better=True,
delete_old=True):
"""Saves a model to disk. Keeps a separate copy of latest and best models.
Arguments:
save_dict: dictionary to save to checkpoint
epoch: epoch number, used to name checkpoint
metric: metric value to be used to evaluate whether this is the
best result
higher_better: True if higher valued metric is better, False
otherwise
delete_old: Delete prior 'lastest' checkpoints. By setting to
false, you'll get a checkpoint saved every time this
function is called.
"""
if not self.rank0:
return
save_dict['__metric'] = metric
if os.path.exists(self.save_ckpt_fn) and delete_old:
os.remove(self.save_ckpt_fn)
# Save out current model
self.save_ckpt_fn = os.path.join(
self.logdir, 'last_checkpoint_ep{}.pth'.format(epoch))
torch.save(save_dict, self.save_ckpt_fn)
self.save_metric = metric
is_better = self.is_better(self.save_metric, self.best_metric,
higher_better)
if is_better:
if os.path.exists(self.best_ckpt_fn):
os.remove(self.best_ckpt_fn)
self.best_ckpt_fn = os.path.join(
self.logdir, 'best_checkpoint_ep{}.pth'.format(epoch))
self.best_metric = self.save_metric
copyfile(self.save_ckpt_fn, self.best_ckpt_fn)
return is_better
def get_best_checkpoint(self):
"""
Finds the checkpoint in `self.logdir` that is considered best.
If, for some reason, there are multiple best checkpoint files, then
the one with the highest epoch will be preferred.
Returns:
None - If there is no best checkpoint file
path (str) - The full path to the best checkpoint otherwise.
"""
match_str = r'^best_checkpoint_ep([0-9]+).pth$'
best_epoch = -1
best_checkpoint = None
for filename in os.listdir(self.logdir):
match = re.fullmatch(match_str, filename)
if match is not None:
# Extract the epoch number
epoch = int(match.group(1))
if epoch > best_epoch:
best_epoch = epoch
best_checkpoint = filename
if best_checkpoint is None:
return None
return os.path.join(self.logdir, best_checkpoint)
def load_model(self, path):
"""Restore a model and return a dict with any meta data included in
the snapshot
"""
checkpoint = torch.load(path)
state_dict = checkpoint['state_dict']
meta = {k: v for k, v in checkpoint.items() if k != 'state_dict'}
return state_dict, meta
# Importing logx gives you access to this shared object
logx = LogX()
| runx-master | runx/logx.py |
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
from tabulate import tabulate
import os
import copy
import argparse
import time
import json
import csv
from .utils import read_config, get_cfg
parser = argparse.ArgumentParser(
description='Summarize run results',
epilog=('Summarize results from training runs. Feed this script the name'
'of the parent directory of a set of runs. sumx will automatically'
'find the experiments by recursing downwards. You should only have'
'to specify the parent because the overall root should come from'
'logroot, contained in ~/.config/runx.yml'))
parser.add_argument('dirs', nargs='+', type=str)
parser.add_argument('--logroot', type=str, default=None)
parser.add_argument('--ignore', type=str, default=None,
help=('comma-separated list of hparams to ignore, default'
'logdir,command,result_dir,nbr_workers,mmap_cache'))
parser.add_argument('--sortwith', '-s', type=str, default=None,
help='sort based on this metrics field')
parser.add_argument('--csv', type=str, default=None,
help='Dump cvs file of results')
args = parser.parse_args()
if args.ignore:
args.ignore = args.ignore.split(',')
else:
args.ignore = []
args.ignore += ['logdir', 'command', 'result_dir', 'nbr_workers', 'paths',
'val_paths']
def load_json(fname):
with open(fname) as json_data:
adict = json.load(json_data)
return adict
def get_runs(parent_dir):
'''
Assemble list of full paths to runs underneath parent.
Can be any depth of hierarchical tree
Look for code.tgz file.
'''
runs = []
for adir in os.listdir(parent_dir):
run_dir = os.path.join(parent_dir, adir)
hparams_fn = os.path.join(run_dir, 'hparams.json')
if os.path.isfile(hparams_fn):
runs += [run_dir]
return runs
def get_hparams(runs):
'''
given a list of full paths to directories, read in all hparams
'''
hparams = {}
for run in runs:
json_fn = os.path.join(run, 'hparams.json')
assert os.path.isfile(json_fn), \
'hparams.json not found in {}'.format(run)
hparams[run] = load_json(json_fn)
return hparams
def load_csv(csv_fn):
fp = open(csv_fn)
csv_reader = csv.reader((x.replace('\0', '') for x in fp), delimiter=',')
return list(csv_reader)
def avg_time_util(metrics_fn):
'''
read in a metrics file
calculate average: epoch time, gpu utilization
'''
metrics = load_csv(metrics_fn)
val_lines = [l for l in metrics if 'val' in l]
if not len(val_lines) or 'timestamp' not in val_lines[0]:
return None
if len(val_lines) == 1:
return val_lines[0]['timestamp']
for metric_line in metrics:
phase = metric_line[0]
metric_line = metric_line[1:]
if phase == 'val':
keys = metric_line[0::2] # evens
vals = metric_line[1::2] # odds
metric_dict = {k: v for k, v in zip(keys, vals)}
return metric_dict
def extract_nontime_metrics(m):
"""
Read latest metrics out of metrics file.
if args.sortwith is defined, also capture the best value for the
args.sortwith metric and add that into the dict returned
"""
metrics = copy.deepcopy(m)
metrics.reverse()
skip_metrics = ('timestamp', 'gpu util')
epochs = 0
metric_dict = {}
saw_final_metrics = False
best_sortwith = None
for metric_line in metrics:
phase = metric_line[0]
metric_line = metric_line[1:]
if phase == 'val':
keys = metric_line[0::2] # evens
vals = metric_line[1::2] # odds
this_line_metrics = dict(zip(keys, vals))
# Capture the final validation metrics
if not saw_final_metrics:
saw_final_metrics = True
for k, v in this_line_metrics.items():
if k not in skip_metrics:
metric_dict[k] = v
# make the assumption that validation step == epoch
if k == 'step' or k == 'epoch':
epochs = int(v)
# Update the best value for sortwith
if args.sortwith:
assert args.sortwith in this_line_metrics
if best_sortwith is None or \
best_sortwith < this_line_metrics[args.sortwith]:
best_sortwith = this_line_metrics[args.sortwith]
metric_dict[args.sortwith + '-best'] = best_sortwith
return metric_dict, epochs
def get_epoch_time(metrics, epochs):
first_time = 0
last_time = 0
# first line should always contain the beginning timestamp
start_metric = metrics[0]
val_metrics = [m for m in metrics if 'val' in m]
# last val line should be time at last epoch
last_metric = val_metrics[-1]
assert 'start' in start_metric, \
'expected start timestamp in first line of metrics file'
if 'timestamp' not in start_metric or 'timestamp' not in last_metric:
return ''
timestamp_idx = start_metric.index('timestamp') + 1
first_time = float(start_metric[timestamp_idx])
timestamp_idx = last_metric.index('timestamp') + 1
last_time = float(last_metric[timestamp_idx])
elapsed_time = last_time - first_time
if epochs == 0:
epochs = 1
epoch_time = time.strftime("%H:%M:%S", time.gmtime(elapsed_time / epochs))
return epoch_time
def has_val(metrics):
counts = [v[0] == 'val' for v in metrics]
return sum(counts)
def get_final_metrics(metrics_fn):
'''
read in a metrics file
return a dict of the final metrics for test/val
also include epoch #
and average minutes/epoch
'''
# Extract reported metrics
metrics = load_csv(metrics_fn)
if has_val(metrics):
metric_dict, epochs = extract_nontime_metrics(metrics)
metric_dict.update({'epoch time': get_epoch_time(metrics, epochs)})
return metric_dict
else:
return None
def get_metrics(runs):
'''
Given the set of runs, pull out metrics
input: run list
output: metrics dict and metrics names
'''
metrics = {}
for run in runs:
metrics_fn = os.path.join(run, 'metrics.csv')
if not os.path.isfile(metrics_fn):
continue
metrics_run = get_final_metrics(metrics_fn)
if metrics_run is not None:
metrics[run] = metrics_run
return metrics
def any_different(alist):
if len(alist) < 2:
return False
first = alist[0]
total = sum([x != first for x in alist[1:]])
if total:
return True
else:
return False
def get_uncommon_hparam_names(all_runs):
'''
returns a list of uncommon hparam names
input:
- dict of hparams for each run
'''
# if 1 or fewer runs
if len(all_runs) <= 1:
return []
# assemble all keys
all_hparams = {}
for run in all_runs.values():
for p in run.keys():
all_hparams[p] = 1
# find all items that ever have different values
uncommon_list = []
for k in all_hparams:
all_values = []
for hparams in all_runs.values():
if k in hparams:
all_values.append(hparams[k])
else:
all_values.append(None)
if any_different(all_values) and k not in args.ignore:
uncommon_list.append(k)
return uncommon_list
def summarize_experiment(parent_dir):
'''
Summarize an experiment, which can consist of many runs.
'''
assert os.path.exists(parent_dir), \
'Couldn\'t find directory {}'.format(parent_dir)
# assemble full paths to list of runs
runs = get_runs(parent_dir)
# dict of dicts of hparams
hparams = get_hparams(runs)
# dict of dicts of final test/val metrics
metrics = get_metrics(runs)
if not len(runs) or not len(metrics):
print('No valid experiments found for {}'.format(parent_dir))
return
# a list of hparams to list out
uncommon_hparams_names = get_uncommon_hparam_names(hparams)
# create header for table
header = ['run']
header += uncommon_hparams_names
first_valid_run = list(metrics.keys())[0]
sorted_metric_keys = sorted(metrics[first_valid_run].keys())
header += sorted_metric_keys
# fill table values out
tablebody = []
# Only iterate through runs in the metrics dict, which is restricted to
# runs for which there are results.
for r in metrics:
# start table with run name, derived from directory
run_dir = r.replace('{}/'.format(parent_dir), '')
entry = [run_dir]
# add to table the uncommon hparams
for v in uncommon_hparams_names:
if v in hparams[r]:
val = hparams[r][v]
entry.append(val)
else:
entry.append(None)
# add key metrics
entry += [metrics[r][k] for k in sorted_metric_keys]
# add entry to the table
tablebody.append(entry)
do_sort = False
if args.sortwith is None:
idx = 0
# Find a field with 'loss' in the name, so we can sort with it.
for h in header:
if 'loss' in h:
do_sort = True
idx = header.index(h)
break
else:
do_sort = True
idx = header.index(args.sortwith + '-best')
if do_sort:
def get_key(entry):
return entry[idx]
try:
tablebody = sorted(tablebody, key=get_key, reverse=True)
except:
print('Some data in table prevented sorting')
pass
if args.csv is not None:
unf_table = [header] + tablebody
f = open("{}.csv".format(args.csv), "w+")
for row in unf_table:
for column in row:
f.write("{}, ".format(column))
f.write("\n")
# We chop long strings into multiple lines if they contain '.' or '_'
# This helps keep the output table more compact
header = [h.replace('.', '\n') for h in header]
header = [h.replace('_', '\n') for h in header]
table = [header] + tablebody
print(tabulate(table, headers='firstrow', floatfmt='1.2e'))
def main():
read_config(args)
if args.logroot is not None:
logroot = args.logroot
elif 'ngc' in get_cfg('FARM'):
logroot = get_cfg('NGC_LOGROOT')
else:
logroot = get_cfg('LOGROOT')
for adir in args.dirs:
full_path = os.path.join(logroot, adir)
summarize_experiment(full_path)
main()
| runx-master | runx/sumx.py |
import os
import shutil
import tempfile
import torch
import unittest
from parameterized import parameterized
import logx
class LogXTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.log_x = logx.LogX()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def test_initialize(self):
self.log_x.initialize(self.test_dir, coolname=True, tensorboard=True)
@parameterized.expand([
(rank, use_tensorboard)
for rank in range(8)
for use_tensorboard in [True, False]
])
def test_tensorboard(self, rank, use_tensorboard):
self.log_x.initialize(self.test_dir, tensorboard=use_tensorboard,
global_rank=rank,
eager_flush=True)
self.log_x.add_scalar('train/loss', 42, 1)
found_events = False
for f in os.listdir(self.test_dir):
if f.startswith('events.out.tfevents.'):
found_events = True
break
self.assertEqual(use_tensorboard and rank == 0, found_events)
@parameterized.expand([
[True],
[False],
])
def test_tb_flushing(self, eager_flush):
old_flush = self.log_x._flush_tensorboard
flushed = [False]
def mock_flush():
flushed[0] = True
old_flush()
self.log_x._flush_tensorboard = mock_flush
self.log_x.initialize(self.test_dir, tensorboard=True,
eager_flush=eager_flush)
# This ensures that `eager_flush` is honored, even when calling through
# to the actual tensorboard API
self.log_x.tensorboard.add_scalars('train/vals', {
'val1': 10,
'val2': 20,
}, 1)
# This will always be called, regardless of what `eager_flush` is set
# to. The actual function will check that value before actually calling
# flush.
self.assertTrue(flushed[0])
@parameterized.expand([
[True],
[False],
])
def test_tb_suspend_flush(self, flush_at_end):
flushed = [False]
def mock_flush():
if self.log_x.eager_flush:
flushed[0] = True
self.log_x._flush_tensorboard = mock_flush
self.log_x.initialize(self.test_dir, tensorboard=True,
eager_flush=True)
self.assertTrue(self.log_x.eager_flush)
with self.log_x.suspend_flush(flush_at_end):
self.assertFalse(self.log_x.eager_flush)
for i in range(10):
self.log_x.tensorboard.add_scalar('train/loss', 9 - i, i)
self.assertFalse(flushed[0])
self.assertTrue(self.log_x.eager_flush)
self.assertEqual(flushed[0], flush_at_end)
@parameterized.expand([
(phase, rank, epoch)
for phase in ['train', 'val']
for rank in range(2)
for epoch in [None, 3]
])
def test_metrics(self, phase, rank, epoch):
self.log_x.initialize(self.test_dir, tensorboard=True,
global_rank=rank)
metrics = [
{'top1': 0.85, 'top5': 0.91, 'auc': 0.89},
{'top1': 0.855, 'top5': 0.92, 'auc': 0.895},
]
epochs = [
epoch + i if epoch is not None else i
for i in range(len(metrics))
]
for e, metric in zip(epochs, metrics):
self.log_x.metric(
phase,
epoch=e if epoch is not None else None,
metrics=metric)
# Force all of the writers to flush
del self.log_x
metrics_file = os.path.join(self.test_dir, 'metrics.csv')
if rank == 0:
self.assertTrue(os.path.exists(metrics_file))
with open(metrics_file, 'r') as fd:
lines = [line.strip() for i, line in enumerate(fd.readlines())
if i > 0]
lines = [line for line in lines if len(line) > 0]
if phase == 'train':
self.assertEqual(len(lines), 0)
elif phase == 'val':
self.assertEqual(len(lines), len(metrics))
for line, expected_epoch, expected_metric in \
zip(lines, epochs, metrics):
vals = line.split(',')
counter = 0
self.assertEqual(vals[counter], 'val')
counter += 1
for k, v in expected_metric.items():
self.assertEqual(vals[counter], k)
self.assertEqual(vals[counter + 1], str(v))
counter += 2
self.assertEqual(vals[counter], 'epoch')
counter += 1
self.assertEqual(int(vals[counter]), expected_epoch)
else:
self.assertFalse(os.path.exists(metrics_file))
def test_best_checkpoint(self):
self.log_x.initialize(self.test_dir)
self.assertIsNone(self.log_x.get_best_checkpoint())
best_path = os.path.join(self.test_dir, 'best_checkpoint_ep5.pth')
with open(best_path, 'w') as fd:
fd.write('hello')
self.assertEqual(self.log_x.get_best_checkpoint(), best_path)
best_path = os.path.join(self.test_dir, 'best_checkpoint_ep10.pth')
with open(best_path, 'w') as fd:
fd.write('hello2')
self.assertEqual(self.log_x.get_best_checkpoint(), best_path)
# This shouldn't change what the best path is due to the epoch rule
with open(os.path.join(self.test_dir, 'best_checkpoint_ep1.pth'),
'w') as fd:
fd.write('hello3')
self.assertEqual(self.log_x.get_best_checkpoint(), best_path)
@parameterized.expand([
[0],
[1],
])
def test_save_model(self, rank):
self.log_x.initialize(self.test_dir, global_rank=rank)
model1 = {
'val1': 42,
'val2': 44,
'val3': torch.tensor([[1, 2], [3, 4]], dtype=torch.float32)
}
self.log_x.save_model(model1, metric=0.5, epoch=0)
self.assertEqual(
os.path.exists(
os.path.join(self.test_dir, 'best_checkpoint_ep0.pth')),
rank == 0)
self.assertEqual(
os.path.exists(
os.path.join(self.test_dir, 'last_checkpoint_ep0.pth')),
rank == 0)
def dict_test(d1, d2):
# d2 is allowed to be a superset of d1
for k, v in d1.items():
self.assertIn(k, d2)
if torch.is_tensor(v):
self.assertTrue(torch.allclose(v, d2[k]))
else:
self.assertEqual(v, d2[k])
if rank == 0:
dict_test(model1, torch.load(self.log_x.get_best_checkpoint()))
model2 = {
'val1': 47,
'val2': 50,
'val3': torch.tensor([[5, 6], [7, 8], [9, 0]], dtype=torch.float32)
}
self.log_x.save_model(model2, metric=0.7, epoch=50)
self.assertFalse(
os.path.exists(
os.path.join(self.test_dir, 'best_checkpoint_ep0.pth')))
self.assertEqual(
os.path.exists(
os.path.join(self.test_dir, 'best_checkpoint_ep50.pth')),
rank == 0)
self.assertEqual(
os.path.exists(
os.path.join(self.test_dir, 'last_checkpoint_ep50.pth')),
rank == 0)
if rank == 0:
dict_test(model2, torch.load(self.log_x.get_best_checkpoint()))
model3 = {
'val1': 2,
'val2': 3,
'val3': torch.rand(3, 3, 3, dtype=torch.float32),
}
# This metric is worse than the previous, so it shouldn't replace best
self.log_x.save_model(model3, metric=0.6, epoch=60)
self.assertEqual(
os.path.exists(
os.path.join(self.test_dir, 'last_checkpoint_ep60.pth')),
rank == 0)
if rank == 0:
dict_test(model2, torch.load(self.log_x.get_best_checkpoint()))
# Now, the hard part. Kill this log_x, and create a new one, to verify
# that resumption works
del self.log_x
self.log_x = logx.LogX()
self.log_x.initialize(self.test_dir, global_rank=rank)
# Again, the best checkpoint should still be model2 at epoch 50
self.log_x.save_model(model3, metric=0.6, epoch=60)
if rank == 0:
dict_test(model2, torch.load(self.log_x.get_best_checkpoint()))
if __name__ == '__main__':
unittest.main()
| runx-master | runx/test/logx_test.py |
import unittest
import utils
class UtilsTest(unittest.TestCase):
def test_conditional_proxy(self):
counter = [0]
class Increment:
def call(self, value):
counter[0] += value
proxy = utils.ConditionalProxy(Increment(), True)
proxy.call(10)
self.assertEqual(counter[0], 10)
proxy = utils.ConditionalProxy(Increment(), False)
# This should *not* be forwarded to the increment object
proxy.call(42)
self.assertEqual(counter[0], 10)
with self.assertRaises(AttributeError):
proxy = utils.ConditionalProxy(Increment(), True)
proxy.blah(1, 2, 3)
post_hook_called = [False]
def post_hook():
post_hook_called[0] = True
proxy = utils.ConditionalProxy(Increment(), True, post_hook=post_hook)
proxy.call(-10)
self.assertEqual(counter[0], 0)
self.assertTrue(post_hook_called[0])
if __name__ == '__main__':
unittest.main()
| runx-master | runx/test/utils_test.py |
'''
This is a modified version of the pytorch example imagenet training code.
'''
import argparse
import os
import random
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from runx.logx import logx
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__") and
callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
if os.environ['NVIDIA_INTERNAL']:
imgnet_path = '/home/dcg-adlr-atao-data.cosmos277/data/ImageNet_s480_q95'
else:
imgnet_path = None
parser.add_argument('--data', default=imgnet_path, help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=6, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=10, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 64), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--logdir', type=str, help='where to write output')
parser.add_argument('--num-classes', default=1000, type=int)
parser.add_argument('--multiprocessing-distributed', action='store_true',
help=('Use multi-processing distributed training to launch'
'N processes per node, which has N GPUs. This is the'
'fastest way to use PyTorch for either single node'
'or multi node data parallel training'))
best_acc1 = 0
def main():
args = parser.parse_args()
logx.initialize(logdir=args.logdir, tensorboard=True, hparams=vars(args))
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node,
args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
logx.msg("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
args.arch = 'resnet18'
logx.msg("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=args.num_classes)
'''
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
'''
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
logx.msg("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logx.msg("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logx.msg("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args, epoch)
# remember best acc@1 and save checkpoint
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_dict = {
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict()}
logx.save_model(
save_dict,
metric=acc1,
epoch=epoch,
higher_better=True)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
metrics = {'loss': losses.avg,
'top1': float(top1.avg),
'top5': float(top5.avg)}
logx.metric('train', metrics, i + epoch * len(train_loader))
if i % args.print_freq == 0:
logx.msg('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion, args, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logx.msg('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
logx.msg(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
metrics = {'top1': float(top1.avg), 'top5': float(top5.avg)}
logx.metric('val', metrics, epoch)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| runx-master | examples/imgnet.py |
'''
This code is a modified copy from the pytorch examples codebase.
'''
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from runx.logx import logx
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logx.msg('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
# capture metrics
metrics = {'loss': loss.item()}
iteration = epoch * len(train_loader) + batch_idx
logx.metric('train', metrics, iteration)
def test(args, model, device, test_loader, epoch, optimizer):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
logx.msg('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), accuracy))
# capture metrics
metrics = {'loss': test_loss, 'accuracy': accuracy}
logx.metric('val', metrics, epoch)
# save model
save_dict = {
'epoch': epoch + 1,
'arch': 'lenet',
'state_dict': model.state_dict(),
'accuracy': accuracy,
'optimizer': optimizer.state_dict()}
logx.save_model(
save_dict,
metric=accuracy,
epoch=epoch,
higher_better=True)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=2, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--logdir', type=str, default=None,
help='target log directory')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
logx.initialize(logdir=args.logdir, coolname=True, tensorboard=True,
hparams=vars(args))
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader, epoch, optimizer)
if __name__ == '__main__':
main()
| runx-master | examples/mnist.py |
# Copyright (c) 2007-2023 The scikit-learn developers. All rights reserved.
# Modifications copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from gen_data_distributed import (
BlobsDataGen,
ClassificationDataGen,
LowRankMatrixDataGen,
RegressionDataGen,
)
from pandas import DataFrame
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from benchmark.utils import WithSparkSession
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_make_blobs(dtype: str) -> None:
input_args = [
"--num_rows",
"50",
"--num_cols",
"2",
"--dtype",
dtype,
"--output_dir",
"temp",
"--output_num_files",
"3",
"--cluster_std",
"0.7",
"--random_state",
"0",
"--include_labels",
"true",
]
data_gen = BlobsDataGen(input_args)
args = data_gen.args
assert args is not None
with WithSparkSession(args.spark_confs, shutdown=(not args.no_shutdown)) as spark:
df, _, centers = data_gen.gen_dataframe_and_meta(spark)
assert df.rdd.getNumPartitions() == 3, "Unexpected number of partitions"
pdf: DataFrame = df.toPandas()
X = pdf.iloc[:, :-1].to_numpy()
y = pdf.iloc[:, -1].to_numpy()
assert X.dtype == np.dtype(dtype), "Unexpected dtype"
assert X.shape == (50, 2), "X shape mismatch"
assert y.shape == (50,), "y shape mismatch"
assert centers.shape == (3, 2), "Centers shape mismatch"
assert np.unique(y).shape == (3,), "Unexpected number of blobs"
cluster_stds = [0.7] * 3
for i, (ctr, std) in enumerate(zip(centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("use_gpu", ["True", "False"])
def test_make_low_rank_matrix(dtype: str, use_gpu: str) -> None:
input_args = [
"--num_rows",
"50",
"--num_cols",
"20",
"--dtype",
dtype,
"--output_dir",
"temp",
"--output_num_files",
"2",
"--effective_rank",
"5",
"--tail_strength",
"0.01",
"--random_state",
"0",
"--use_gpu",
use_gpu,
]
data_gen = LowRankMatrixDataGen(input_args)
args = data_gen.args
assert args is not None
with WithSparkSession(args.spark_confs, shutdown=(not args.no_shutdown)) as spark:
df, _ = data_gen.gen_dataframe(spark)
assert df.rdd.getNumPartitions() == 2, "Unexpected number of partitions"
pdf: DataFrame = df.toPandas()
X = pdf.to_numpy()
assert X.dtype == np.dtype(dtype), "Unexpected dtype"
assert X.shape == (50, 20), "X shape mismatch"
from numpy.linalg import svd
_, s, _ = svd(X)
assert sum(s) - 5 < 0.1, "X rank is not approximately 5"
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("low_rank", [True, False])
@pytest.mark.parametrize("use_gpu", ["True", "False"])
def test_make_regression(dtype: str, low_rank: bool, use_gpu: str) -> None:
input_args = [
"--num_rows",
"100",
"--num_cols",
"10",
"--dtype",
dtype,
"--output_dir",
"temp",
"--output_num_files",
"3",
"--n_informative",
"3",
"--bias",
"0.0",
"--noise",
"1.0",
"--random_state",
"0",
"--use_gpu",
use_gpu,
]
if low_rank:
input_args.extend(("--effective_rank", "5"))
data_gen = RegressionDataGen(input_args)
args = data_gen.args
assert args is not None
with WithSparkSession(args.spark_confs, shutdown=(not args.no_shutdown)) as spark:
df, _, c = data_gen.gen_dataframe_and_meta(spark)
assert df.rdd.getNumPartitions() == 3, "Unexpected number of partitions"
pdf: DataFrame = df.toPandas()
X = pdf.iloc[:, :-1].to_numpy()
y = pdf.iloc[:, -1].to_numpy()
assert X.dtype == np.dtype(dtype), "Unexpected dtype"
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert c.shape == (10,), "coef shape mismatch"
assert sum(c != 0.0) == 3, "Unexpected number of informative features"
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("num_rows", [2000, 2001]) # test uneven samples per cluster
@pytest.mark.parametrize(
"n_informative, n_repeated, n_redundant", [(31, 0, 0), (28, 3, 0), (23, 3, 4)]
)
def test_make_classification(
dtype: str, num_rows: int, n_informative: int, n_repeated: int, n_redundant: int
) -> None:
input_args = [
"--num_rows",
str(num_rows),
"--num_cols",
"31",
"--dtype",
dtype,
"--output_dir",
"temp",
"--output_num_files",
"3",
"--n_informative",
str(n_informative),
"--n_redundant",
str(n_redundant),
"--n_repeated",
str(n_repeated),
"--hypercube",
"True",
"--scale",
"0.5",
"--flip_y",
"0",
"--random_state",
"0",
]
data_gen = ClassificationDataGen(input_args)
args = data_gen.args
assert args is not None
with WithSparkSession(args.spark_confs, shutdown=(not args.no_shutdown)) as spark:
df, _ = data_gen.gen_dataframe(spark)
assert df.rdd.getNumPartitions() == 3, "Unexpected number of partitions"
pdf: DataFrame = df.toPandas()
X = pdf.iloc[:, :-1].to_numpy()
y = pdf.iloc[:, -1].to_numpy()
assert X.dtype == np.dtype(dtype), "Unexpected dtype"
assert X.shape == (num_rows, 31), "X shape mismatch"
assert y.shape == (num_rows,), "y shape mismatch"
assert np.unique(y).shape == (2,), "Unexpected number of classes"
if num_rows == 2000:
assert sum(y == 0) == 1000, "Unexpected number of samples in class 0"
assert sum(y == 1) == 1000, "Unexpected number of samples in class 1"
else:
assert (
abs(sum(y == 0) - sum(y == 1)) == 1
), "Unexpected number of samples per class"
assert (
np.unique(X, axis=0).shape[0] == num_rows
), "Unexpected number of unique rows"
assert (
np.unique(X, axis=1).shape[1] == 31 - n_repeated
), "Unexpected number of unique columns"
assert (
np.linalg.matrix_rank(X) == 31 - n_repeated - n_redundant
), "Unexpected matrix rank"
| spark-rapids-ml-branch-23.10 | python/benchmark/test_gen_data.py |
# Copyright (c) 2007-2023 The scikit-learn developers. All rights reserved.
# Modifications copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import random
from abc import abstractmethod
from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple
import numpy as np
import pandas as pd
import pyspark
from gen_data import DataGenBase, DefaultDataGen, main
from pyspark.mllib.random import RandomRDDs
from pyspark.sql import DataFrame, SparkSession
from sklearn.datasets import (
make_blobs,
make_classification,
make_low_rank_matrix,
make_regression,
)
from sklearn.datasets._samples_generator import _generate_hypercube
from sklearn.utils import shuffle as util_shuffle
from benchmark.utils import inspect_default_params_from_func
class DataGenBaseMeta(DataGenBase):
"""Base class datagen with meta info support"""
def __init__(self) -> None:
super().__init__()
@abstractmethod
def gen_dataframe_and_meta(
self, spark: SparkSession
) -> Tuple[DataFrame, List[str], np.ndarray]:
raise NotImplementedError()
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
df, feature_cols, _ = self.gen_dataframe_and_meta(spark)
return df, feature_cols
class BlobsDataGen(DataGenBaseMeta):
"""Generate random dataset using distributed calls to sklearn.datasets.make_blobs,
which creates blobs for benchmarking unsupervised clustering algorithms (e.g. KMeans)
"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_blobs, ["n_samples", "n_features", "return_centers"]
)
# must replace the None to the correct type
params["centers"] = int
params["random_state"] = int
params["include_labels"] = bool
return params
def gen_dataframe_and_meta(
self, spark: SparkSession
) -> Tuple[DataFrame, List[str], np.ndarray]:
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_blobs")
include_labels = params.pop("include_labels", False)
rows = self.num_rows
cols = self.num_cols
assert self.args is not None
num_partitions = self.args.output_num_files
# Set num_partitions to Spark's default if output_num_files is not provided.
if num_partitions is None:
num_partitions = spark.sparkContext.defaultParallelism
# Produce partition seeds for reproducibility.
random.seed(params["random_state"])
seed_maxval = 100 * num_partitions
partition_seeds = random.sample(range(1, seed_maxval), num_partitions)
partition_sizes = [rows // num_partitions] * num_partitions
partition_sizes[-1] += rows % num_partitions
# Generate centers upfront.
_, _, centers = make_blobs(
n_samples=1, n_features=cols, **params, return_centers=True
)
# Update params for partition-specific calls.
params["centers"] = centers
del params["random_state"]
maxRecordsPerBatch = int(
spark.sparkContext.getConf().get(
"spark.sql.execution.arrow.maxRecordsPerBatch", "10000"
)
)
# UDF to distribute make_blobs() calls across partitions. Each partition
# produces an equal fraction of the total samples around the predefined centers.
def make_blobs_udf(iter: Iterable[pd.DataFrame]) -> Iterable[pd.DataFrame]:
for pdf in iter:
partition_index = pdf.iloc[0][0]
n_partition_samples = partition_sizes[partition_index]
X, y = make_blobs(
n_samples=n_partition_samples,
n_features=cols,
**params,
random_state=partition_seeds[partition_index],
)
if include_labels:
data = np.concatenate(
(
X.astype(dtype),
y.reshape(n_partition_samples, 1).astype(dtype),
),
axis=1,
)
else:
data = X.astype(dtype)
del X
del y
for i in range(0, n_partition_samples, maxRecordsPerBatch):
end_idx = min(i + maxRecordsPerBatch, n_partition_samples)
yield pd.DataFrame(data=data[i:end_idx])
if include_labels:
label_col = "label"
self.schema.append(f"{label_col} {self.pyspark_type}")
return (
(
spark.range(
0, num_partitions, numPartitions=num_partitions
).mapInPandas(make_blobs_udf, schema=",".join(self.schema))
),
self.feature_cols,
centers,
)
class LowRankMatrixDataGen(DataGenBase):
"""Generate random dataset using a distributed version of sklearn.datasets.make_low_rank_matrix,
which creates large low rank matrices for benchmarking dimensionality reduction algos like pca
"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_low_rank_matrix, ["n_samples", "n_features"]
)
# must replace the None to the correct type
params["random_state"] = int
params["use_gpu"] = bool
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_low_rank_matrix")
rows = self.num_rows
cols = self.num_cols
assert self.args is not None
num_partitions = self.args.output_num_files
# Set num_partitions to Spark's default if output_num_files is not provided.
if num_partitions is None:
num_partitions = spark.sparkContext.defaultParallelism
n = min(rows, cols)
np.random.seed(params["random_state"])
# If params not provided, set to defaults.
effective_rank = params.get("effective_rank", 10)
tail_strength = params.get("tail_strength", 0.5)
use_gpu = params.get("use_gpu", False)
partition_sizes = [rows // num_partitions] * num_partitions
partition_sizes[-1] += rows % num_partitions
# Check sizes to ensure QR decomp produces a matrix of the correct dimension.
assert partition_sizes[0] >= cols, (
f"Num samples per partition ({partition_sizes[0]}) must be >= num_features ({cols});"
f" decrease num_partitions from {num_partitions} to <= {rows // cols}"
)
# Generate U, S, V, the SVD decomposition of the output matrix.
# Code adapted from sklearn.datasets.make_low_rank_matrix().
singular_ind = np.arange(n, dtype=dtype)
low_rank = (1 - tail_strength) * np.exp(
-1.0 * (singular_ind / effective_rank) ** 2
)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
# S and V are generated upfront, U is generated across partitions.
s = np.identity(n) * (low_rank + tail)
v, _ = np.linalg.qr(
np.random.standard_normal(size=(cols, n)),
mode="reduced",
)
# Precompute the S*V.T multiplicland with partition-wise normalization.
sv_normed = np.dot(s, v.T) * np.sqrt(1 / num_partitions)
del s
del v
maxRecordsPerBatch = int(
spark.sparkContext.getConf().get(
"spark.sql.execution.arrow.maxRecordsPerBatch", "10000"
)
)
# UDF for distributed generation of U and the resultant product U*S*V.T
def make_matrix_udf(iter: Iterable[pd.DataFrame]) -> Iterable[pd.DataFrame]:
for pdf in iter:
use_cupy = use_gpu
if use_cupy:
try:
import cupy as cp
except ImportError:
use_cupy = False
logging.warning("cupy import failed; falling back to numpy.")
partition_index = pdf.iloc[0][0]
n_partition_rows = partition_sizes[partition_index]
# Additional batch-wise normalization.
if use_cupy:
batch_norm = cp.sqrt(-(-n_partition_rows // maxRecordsPerBatch))
sv_batch_normed = cp.asarray(sv_normed) * batch_norm
else:
batch_norm = np.sqrt(-(-n_partition_rows // maxRecordsPerBatch))
sv_batch_normed = sv_normed * batch_norm
del batch_norm
for i in range(0, n_partition_rows, maxRecordsPerBatch):
end_idx = min(i + maxRecordsPerBatch, n_partition_rows)
if use_cupy:
u, _ = cp.linalg.qr(
cp.random.standard_normal(size=(end_idx - i, n)),
mode="reduced",
)
data = cp.dot(u, sv_batch_normed).get()
else:
u, _ = np.linalg.qr(
np.random.standard_normal(size=(end_idx - i, n)),
mode="reduced",
)
data = np.dot(u, sv_batch_normed)
del u
yield pd.DataFrame(data=data)
return (
(
spark.range(
0, num_partitions, numPartitions=num_partitions
).mapInPandas(make_matrix_udf, schema=",".join(self.schema))
),
self.feature_cols,
)
class RegressionDataGen(DataGenBaseMeta):
"""Generate regression dataset using a distributed version of sklearn.datasets.regression,
including features and labels.
"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_regression, ["n_samples", "n_features", "coef"]
)
# must replace the None to the correct type
params["effective_rank"] = int
params["random_state"] = int
params["use_gpu"] = bool
return params
def gen_dataframe_and_meta(
self, spark: SparkSession
) -> Tuple[DataFrame, List[str], np.ndarray]:
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_regression")
rows = self.num_rows
cols = self.num_cols
assert self.args is not None
num_partitions = self.args.output_num_files
# Set num_partitions to Spark's default if output_num_files is not provided.
if num_partitions is None:
num_partitions = spark.sparkContext.defaultParallelism
# Retrieve input params or set to defaults.
seed = params["random_state"]
generator = np.random.RandomState(seed)
bias = params.get("bias", 0.0)
noise = params.get("noise", 0.0)
shuffle = params.get("shuffle", True)
effective_rank = params.get("effective_rank", None)
n_informative = params.get("n_informative", 10)
n_targets = params.get("n_targets", 1)
use_gpu = params.get("use_gpu", False)
# Description (from sklearn):
#
# Input set is either well conditioned (default) or has a low rank fat tail singular profile (see LowRankMatrixDataGen).
# Output is generated by applying a (potentially biased) random linear regression model to the input, with n_informative
# nonzero regressors and some gaussian centered noise with adjustable scale.
#
# Code adapted from sklearn.datasets.make_regression().
if effective_rank is not None:
tail_strength = params.get("tail_strength", 0.5)
lrm_input_args = [
"--num_rows",
str(rows),
"--num_cols",
str(cols),
"--dtype",
str(dtype),
"--output_dir",
"temp",
"--output_num_files",
str(num_partitions),
"--effective_rank",
str(effective_rank),
"--tail_strength",
str(tail_strength),
"--random_state",
str(seed),
"--use_gpu",
str(use_gpu),
]
# Generate a low-rank, fat tail input set.
X, _ = LowRankMatrixDataGen(lrm_input_args).gen_dataframe(spark)
assert X.rdd.getNumPartitions() == num_partitions, (
f"Unexpected num partitions received from LowRankMatrix;"
f"expected {num_partitions}, got {X.rdd.getNumPartitions()}"
)
else:
# Randomly generate a well-conditioned input set.
X = spark.createDataFrame(
RandomRDDs.normalVectorRDD(
spark.sparkContext,
rows,
cols,
numPartitions=num_partitions,
seed=seed,
).map(
lambda nparray: nparray.tolist() # type: ignore
),
schema=",".join(self.schema),
)
assert X.rdd.getNumPartitions() == num_partitions, (
f"Unexpected num partitions received from RandomRDDs;"
f"expected {num_partitions}, got {X.rdd.getNumPartitions()}"
)
# Generate ground truth upfront.
ground_truth = np.zeros((cols, n_targets))
ground_truth[:n_informative, :] = 100 * generator.uniform(
size=(n_informative, n_targets)
)
if shuffle:
# Shuffle feature indices upfront.
col_indices = np.arange(cols)
generator.shuffle(col_indices)
ground_truth = ground_truth[col_indices]
# Create different partition seeds for sample generation.
random.seed(params["random_state"])
seed_maxval = 100 * num_partitions
partition_seeds = random.sample(range(1, seed_maxval), num_partitions)
# UDF for distributed generation of X and y.
def make_regression_udf(iter: Iterable[pd.DataFrame]) -> Iterable[pd.DataFrame]:
use_cupy = use_gpu
if use_cupy:
try:
import cupy as cp
except ImportError:
use_cupy = False
logging.warning("cupy import failed; falling back to numpy.")
partition_index = pyspark.TaskContext().partitionId()
if use_cupy:
generator_p = cp.random.RandomState(partition_seeds[partition_index])
ground_truth_cp = cp.asarray(ground_truth)
col_indices_cp = cp.asarray(col_indices)
else:
generator_p = np.random.RandomState(partition_seeds[partition_index])
for pdf in iter:
if use_cupy:
X_p = cp.asarray(pdf.to_numpy())
else:
X_p = pdf.to_numpy()
if shuffle:
# Column-wise shuffle (global)
if use_cupy:
X_p[:, :] = X_p[:, col_indices_cp]
else:
X_p[:, :] = X_p[:, col_indices]
if use_cupy:
y = cp.dot(X_p, ground_truth_cp) + bias
else:
y = np.dot(X_p, ground_truth) + bias
if noise > 0.0:
y += generator_p.normal(scale=noise, size=y.shape)
n_partition_rows = X_p.shape[0]
if shuffle:
# Row-wise shuffle (partition)
if use_cupy:
row_indices = cp.random.permutation(n_partition_rows)
X_p = X_p[row_indices]
y = y[row_indices]
else:
X_p, y = util_shuffle(X_p, y, random_state=generator_p)
if use_cupy:
y = cp.squeeze(y)
data = cp.concatenate(
(
X_p.astype(dtype),
y.reshape(n_partition_rows, 1).astype(dtype),
),
axis=1,
).get()
else:
y = np.squeeze(y)
data = np.concatenate(
(
X_p.astype(dtype),
y.reshape(n_partition_rows, 1).astype(dtype),
),
axis=1,
)
del X_p
del y
yield pd.DataFrame(data=data)
label_col = "label"
self.schema.append(f"{label_col} {self.pyspark_type}")
return (
(X.mapInPandas(make_regression_udf, schema=",".join(self.schema))),
self.feature_cols,
np.squeeze(ground_truth),
)
class ClassificationDataGen(DataGenBase):
"""Generate classification dataset using a distributed version of sklearn.datasets.classification,
including features and labels."""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_classification, ["n_samples", "n_features", "weights"]
)
# must replace the None to the correct type
params["random_state"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_classification")
n_samples = self.num_rows
n_features = self.num_cols
assert self.args is not None
num_partitions = self.args.output_num_files
# Set num_partitions to Spark's default if output_num_files is not provided.
if num_partitions is None:
num_partitions = spark.sparkContext.defaultParallelism
# For detailed parameter descriptions, see below:
# https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html
# Retrieve input params or set to defaults.
n_informative = params.get("n_informative", 2)
n_redundant = params.get("n_redundant", 2)
n_repeated = params.get("n_repeated", 0)
n_classes = params.get("n_classes", 2)
n_clusters_per_class = params.get("n_clusters_per_class", 2)
flip_y = params.get("flip_y", 0.01)
class_sep = params.get("class_sep", 1.0)
hypercube = params.get("hypercube", True)
shift = params.get("shift", 0.0)
scale = params.get("scale", 1.0)
shuffle = params.get("shuffle", True)
generator = np.random.RandomState(params["random_state"])
# Generate a random n-class classification problem.
# Code adapted from sklearn.datasets.make_classification.
# Check feature and cluster counts.
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError(
"Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features"
)
if n_informative < np.log2(
n_classes * n_clusters_per_class
): # log2 to avoid overflow errors
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(
msg.format(
n_classes, n_clusters_per_class, n_informative, 2**n_informative
)
)
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters.
n_samples_per_cluster = [n_samples // n_clusters] * n_clusters
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Distribute cluster samples among partitions.
def distribute_samples(
samples_per_cluster: List[int], num_partitions: int
) -> List[List[int]]:
# Generates a list of num_partitions lists, each containing the samples to generate per cluster for that partition.
num_clusters = len(samples_per_cluster)
samples_per_partition = [[0] * num_clusters for _ in range(num_partitions)]
for i, samples in enumerate(samples_per_cluster):
quotient, remainder = divmod(samples, num_partitions)
for j in range(num_partitions):
samples_per_partition[j][i] += quotient
for j in range(remainder):
samples_per_partition[j][i] += 1
return samples_per_partition
n_samples_per_cluster_partition = distribute_samples(
n_samples_per_cluster, num_partitions
)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(
float, copy=False
)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.uniform(size=(n_clusters, 1))
centroids *= generator.uniform(size=(1, n_informative))
# Precompute covariance coefficients / noise parameters
A = [
2 * generator.uniform(size=(n_informative, n_informative)) - 1
for _ in range(n_clusters)
]
if n_redundant > 0:
B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1
if n_repeated > 0:
n = n_informative + n_redundant
repeat_indices = (
(n - 1) * generator.uniform(size=n_repeated) + 0.5
).astype(np.intp)
if shift is None:
shift = (2 * generator.uniform(size=n_features) - 1) * class_sep
if scale is None:
scale = 1 + 100 * generator.uniform(size=n_features)
if shuffle:
shuffle_indices = np.arange(n_features)
generator.shuffle(shuffle_indices)
# Create different partition seeds for sample generation
random.seed(params["random_state"])
seed_maxval = 100 * num_partitions
partition_seeds = random.sample(range(1, seed_maxval), num_partitions)
maxRecordsPerBatch = int(
spark.sparkContext.getConf().get(
"spark.sql.execution.arrow.maxRecordsPerBatch", "10000"
)
)
def make_classification_udf(
iter: Iterable[pd.DataFrame],
) -> Iterable[pd.DataFrame]:
for pdf in iter:
partition_index = pdf.iloc[0][0]
n_cluster_samples = n_samples_per_cluster_partition[partition_index]
n_partition_samples = sum(n_cluster_samples)
X_p = np.zeros((n_partition_samples, n_features))
y = np.zeros(n_partition_samples, dtype=int)
generator = np.random.RandomState(partition_seeds[partition_index])
# Create informative features
X_p[:, :n_informative] = generator.standard_normal(
size=(n_partition_samples, n_informative)
)
# Generate the samples per cluster for which this partition is responsible
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_cluster_samples[k]
y[start:stop] = k % n_classes # assign labels
X_k = X_p[start:stop, :n_informative] # slice a view of the cluster
X_k[...] = np.dot(X_k, A[k]) # introduce random covariance
X_k += centroid # shift the cluster to vertex
# Create redundant features
if n_redundant > 0:
X_p[:, n_informative : n_informative + n_redundant] = np.dot(
X_p[:, :n_informative], B
)
# Repeat some features
if n_repeated > 0:
X_p[:, n : n + n_repeated] = X_p[:, repeat_indices]
# Fill useless features
if n_useless > 0:
X_p[:, -n_useless:] = generator.standard_normal(
size=(n_partition_samples, n_useless)
)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.uniform(size=n_partition_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
X_p += shift
X_p *= scale
if shuffle:
X_p, y = util_shuffle(
X_p, y, random_state=generator
) # Randomly permute samples
X_p[:, :] = X_p[:, shuffle_indices] # Randomly permute features
data = np.concatenate(
(
X_p.astype(dtype),
y.reshape(n_partition_samples, 1).astype(dtype),
),
axis=1,
)
del X_p
del y
for i in range(0, n_partition_samples, maxRecordsPerBatch):
end_idx = min(i + maxRecordsPerBatch, n_partition_samples)
yield pd.DataFrame(data=data[i:end_idx])
label_col = "label"
self.schema.append(f"{label_col} {self.pyspark_type}")
return (
spark.range(0, num_partitions, numPartitions=num_partitions).mapInPandas(
make_classification_udf, schema=",".join(self.schema)
)
), self.feature_cols
if __name__ == "__main__":
"""
See gen_data.main for more info.
"""
registered_data_gens = {
"blobs": BlobsDataGen,
"default": DefaultDataGen,
"low_rank_matrix": LowRankMatrixDataGen,
"regression": RegressionDataGen,
"classification": ClassificationDataGen,
}
main(registered_data_gens=registered_data_gens, repartition=False)
| spark-rapids-ml-branch-23.10 | python/benchmark/gen_data_distributed.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
from abc import abstractmethod
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pyspark.mllib.random import RandomRDDs
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import array
from sklearn.datasets import (
make_blobs,
make_classification,
make_low_rank_matrix,
make_regression,
)
from benchmark.utils import WithSparkSession, inspect_default_params_from_func, to_bool
def dtype_to_pyspark_type(dtype: Union[np.dtype, str]) -> str:
"""Convert np.dtype to the corresponding pyspark type"""
dtype = np.dtype(dtype)
if dtype == np.float32:
return "float"
elif dtype == np.float64:
return "double"
else:
raise RuntimeError("Unsupported dtype, found ", dtype)
class DataGen(object):
"""DataGen interface"""
@abstractmethod
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
raise NotImplementedError()
class DataGenBase(DataGen):
"""Base class datagen"""
def __init__(self) -> None:
# Global parameters
self._parser = argparse.ArgumentParser()
self._parser.add_argument(
"--num_rows",
type=int,
default=100,
help="total number of rows. default to 100",
)
self._parser.add_argument(
"--num_cols",
type=int,
default=30,
help="total number of columns. default to 30",
)
self._parser.add_argument(
"--dtype",
type=str,
choices=["float64", "float32"],
default="float32",
help="the data type, default to float32",
)
self._parser.add_argument(
"--feature_type",
type=str,
choices=["array", "vector", "multi_cols"],
default="multi_cols",
help="array - 1 column with ArrayType<dtype>, vector - 1 column with VectorUDT type, multi_cols: multiple columns with dtype. Default to multiple",
)
self._parser.add_argument(
"--output_dir", type=str, required=True, help="the dataset output directory"
)
self._parser.add_argument(
"--output_num_files", type=int, help="the number of files to be generated"
)
self._parser.add_argument(
"--overwrite", action="store_true", help="if overwrite the output directory"
)
self._parser.add_argument(
"--spark_confs",
action="append",
default=[],
help="the optional spark configurations",
)
self._parser.add_argument(
"--no_shutdown",
action="store_true",
help="do not stop spark session when finished",
)
def _restrict_train_size(x: float) -> float:
# refer to https://stackoverflow.com/a/12117065/1928940
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError(f"{x} is not a floating-point literal")
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError(f"{x} is not in range [0.0, 1.0]")
return x
self._parser.add_argument(
"--train_fraction",
type=_restrict_train_size, # type: ignore
help="the value should be between 0.0 and 1.0 and represent "
"the proportion of the dataset to include in the train split",
)
self._add_extra_parameters()
self.args_: Optional[argparse.Namespace] = None
def _add_extra_parameters(self) -> None:
self.supported_extra_params = self._supported_extra_params()
for name, value in self.supported_extra_params.items():
if value is None:
raise RuntimeError("Must convert None value to the correct type")
elif type(value) is type:
# value is already type
self._parser.add_argument("--" + name, type=value)
elif type(value) is bool:
self._parser.add_argument("--" + name, type=to_bool)
else:
# get the type from the value
self._parser.add_argument("--" + name, type=type(value))
def _supported_extra_params(self) -> Dict[str, Any]:
"""Function to inspect the specific function to get the parameters and values"""
return {}
def _parse_arguments(self, argv: List[Any]) -> None:
"""Subclass must call this function in __init__"""
self.args_ = self._parser.parse_args(argv)
self.num_rows = self.args_.num_rows
self.num_cols = self.args_.num_cols
self.dtype = np.dtype(self.args_.dtype)
self.pyspark_type = dtype_to_pyspark_type(self.dtype)
self.feature_cols: List[str] = [f"c{i}" for i in range(self.num_cols)]
self.schema = [f"{c} {self.pyspark_type}" for c in self.feature_cols]
self.extra_params = {
k: v
for k, v in vars(self.args_).items()
if k in self.supported_extra_params and v is not None
}
@property
def args(self) -> Optional[argparse.Namespace]:
return self.args_
class DefaultDataGen(DataGenBase):
"""Generate default dataset only containing features"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(RandomRDDs.uniformVectorRDD, [])
# must replace the None to the correct type
params["numPartitions"] = int
params["seed"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
params = self.extra_params
if "seed" not in params:
# for reproducible dataset.
params["seed"] = 1
print(f"Passing {params} to RandomRDDs.uniformVectorRDD")
rdd = RandomRDDs.uniformVectorRDD(
spark.sparkContext, self.num_rows, self.num_cols, **params
).map(
lambda nparray: nparray.tolist() # type: ignore
)
return (
spark.createDataFrame(rdd, schema=",".join(self.schema)),
self.feature_cols,
)
class BlobsDataGen(DataGenBase):
"""Generate random dataset using sklearn.datasets.make_blobs,
which creates blobs for benchmarking unsupervised clustering algorithms (e.g. KMeans)
"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_blobs, ["n_samples", "n_features", "return_centers"]
)
# must replace the None to the correct type
params["centers"] = int
params["random_state"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
"More information about the implementation can be found in RegressionDataGen."
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_blobs")
rows = self.num_rows
cols = self.num_cols
def make_blobs_udf(iter: Iterator[pd.Series]) -> pd.DataFrame:
data, _ = make_blobs(n_samples=rows, n_features=cols, **params)
data = data.astype(dtype)
yield pd.DataFrame(data=data)
return (
spark.range(0, self.num_rows, 1, 1).mapInPandas(
make_blobs_udf, schema=",".join(self.schema) # type: ignore
)
), self.feature_cols
class LowRankMatrixDataGen(DataGenBase):
"""Generate random dataset using sklearn.datasets.make_low_rank_matrix,
which creates large low rank matrices for benchmarking dimensionality reduction algos like pca
"""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_low_rank_matrix, ["n_samples", "n_features"]
)
# must replace the None to the correct type
params["random_state"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
"More information about the implementation can be found in RegressionDataGen."
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
rows = self.num_rows
cols = self.num_cols
print(f"Passing {params} to make_low_rank_matrix")
def make_matrix_udf(iter: Iterator[pd.Series]) -> pd.DataFrame:
data = make_low_rank_matrix(n_samples=rows, n_features=cols, **params)
data = data.astype(dtype)
yield pd.DataFrame(data=data)
return (
spark.range(0, self.num_rows, 1, 1).mapInPandas(
make_matrix_udf, schema=",".join(self.schema) # type: ignore
)
), self.feature_cols
class RegressionDataGen(DataGenBase):
"""Generate regression dataset including features and label."""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_regression, ["n_samples", "n_features", "coef"]
)
# must replace the None to the correct type
params["effective_rank"] = int
params["random_state"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
num_cols = self.num_cols
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_regression")
def make_regression_udf(iter: Iterator[pd.Series]) -> pd.DataFrame:
"""Pandas udf to call make_regression of sklearn to generate regression dataset"""
total_rows = 0
for pdf in iter:
total_rows += pdf.shape[0]
# here we iterator all batches of a single partition to get total rows.
# use 10% of num_cols for number of informative features, following ratio for defaults
X, y = make_regression(n_samples=total_rows, n_features=num_cols, **params)
data = np.concatenate(
(X.astype(dtype), y.reshape(total_rows, 1).astype(dtype)), axis=1
)
del X
del y
yield pd.DataFrame(data=data)
label_col = "label"
self.schema.append(f"{label_col} {self.pyspark_type}")
# Each make_regression calling will return regression dataset with different coef.
# So force to only 1 task to generate the regression dataset, which may cause OOM
# and perf issue easily. I tested this script can generate 100, 000, 000 * 30
# matrix without issues with 60g executor memory, which, I think, is really enough
# to do the perf test.
return (
spark.range(0, self.num_rows, 1, 1).mapInPandas(
make_regression_udf, schema=",".join(self.schema) # type: ignore
)
), self.feature_cols
class ClassificationDataGen(DataGenBase):
"""Generate classification dataset including features and label."""
def __init__(self, argv: List[Any]) -> None:
super().__init__()
self._parse_arguments(argv)
def _supported_extra_params(self) -> Dict[str, Any]:
params = inspect_default_params_from_func(
make_classification, ["n_samples", "n_features", "weights"]
)
# must replace the None to the correct type
params["random_state"] = int
return params
def gen_dataframe(self, spark: SparkSession) -> Tuple[DataFrame, List[str]]:
num_cols = self.num_cols
dtype = self.dtype
params = self.extra_params
if "random_state" not in params:
# for reproducible dataset.
params["random_state"] = 1
print(f"Passing {params} to make_classification")
def make_classification_udf(iter: Iterator[pd.Series]) -> pd.DataFrame:
"""Pandas udf to call make_classification of sklearn to generate classification dataset"""
total_rows = 0
for pdf in iter:
total_rows += pdf.shape[0]
# here we iterator all batches of a single partition to get total rows.
X, y = make_classification(
n_samples=total_rows, n_features=num_cols, **params
)
data = np.concatenate(
(X.astype(dtype), y.reshape(total_rows, 1).astype(dtype)), axis=1
)
del X
del y
yield pd.DataFrame(data=data)
label_col = "label"
self.schema.append(f"{label_col} {self.pyspark_type}")
# Each make_regression calling will return regression dataset with different coef.
# So force to only 1 task to generate the regression dataset, which may cause OOM
# and perf issue easily. I tested this script can generate 100, 000, 000 * 30
# matrix without issues with 60g executor memory, which, I think, is really enough
# to do the perf test.
return (
spark.range(0, self.num_rows, 1, 1).mapInPandas(
make_classification_udf, schema=",".join(self.schema) # type: ignore
)
), self.feature_cols
def main(registered_data_gens: Dict[str, Any], repartition: bool) -> None:
"""
python gen_data.py [regression|blobs|low_rank_matrix|default|classification] \
--num_rows 5000 \
--num_cols 3000 \
--dtype "float64" \
--output_dir "./5k_2k_float64.parquet" \
--spark_confs "spark.master=local[*]" \
--spark_confs "spark.driver.memory=128g"
"""
parser = argparse.ArgumentParser(
description="Generate random dataset.",
usage="""gen_data.py <type> [<args>]
Supported types are:
blobs Generate random blobs datasets using sklearn's make_blobs
regression Generate random regression datasets using sklearn's make_regression
classification Generate random classification datasets using sklearn's make_classification
low_rank_matrix Generate random dataset using sklearn's make_low_rank_matrix
default Generate default dataset using pyspark RandomRDDs.uniformVectorRDD
""",
)
parser.add_argument("type", help="Generate random dataset")
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if args.type not in registered_data_gens:
print("Unrecognized type: ", args.type)
parser.print_help()
exit(1)
data_gen = registered_data_gens[args.type](sys.argv[2:]) # type: ignore
# Must repartition for default.
if args.type == "default":
repartition = True
assert data_gen.args is not None
args = data_gen.args
with WithSparkSession(args.spark_confs, shutdown=(not args.no_shutdown)) as spark:
df, feature_cols = data_gen.gen_dataframe(spark)
if args.feature_type == "array":
df = df.withColumn("feature_array", array(*feature_cols)).drop(
*feature_cols
)
elif args.feature_type == "vector":
from pyspark.ml.feature import VectorAssembler
df = (
VectorAssembler()
.setInputCols(feature_cols)
.setOutputCol("feature_array")
.transform(df)
.drop(*feature_cols)
)
def write_files(dataframe: DataFrame, path: str) -> None:
if args.output_num_files is not None and repartition:
dataframe = dataframe.repartition(args.output_num_files)
writer = dataframe.write
if args.overwrite:
writer = writer.mode("overwrite")
writer.parquet(path)
if args.train_fraction is not None:
train_df, eval_df = df.randomSplit(
[args.train_fraction, 1 - args.train_fraction], seed=1
)
write_files(train_df, f"{args.output_dir}/train")
write_files(eval_df, f"{args.output_dir}/eval")
else:
write_files(df, args.output_dir)
df.printSchema()
print("gen_data finished")
if __name__ == "__main__":
registered_data_gens = {
"blobs": BlobsDataGen,
"regression": RegressionDataGen,
"classification": ClassificationDataGen,
"low_rank_matrix": LowRankMatrixDataGen,
"default": DefaultDataGen,
}
main(registered_data_gens=registered_data_gens, repartition=True)
| spark-rapids-ml-branch-23.10 | python/benchmark/gen_data.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
from benchmark.bench_kmeans import BenchmarkKMeans
from benchmark.bench_linear_regression import BenchmarkLinearRegression
from benchmark.bench_logistic_regression import BenchmarkLogisticRegression
from benchmark.bench_nearest_neighbors import BenchmarkNearestNeighbors
from benchmark.bench_pca import BenchmarkPCA
from benchmark.bench_random_forest import (
BenchmarkRandomForestClassifier,
BenchmarkRandomForestRegressor,
)
from benchmark.bench_umap import BenchmarkUMAP
class BenchmarkRunner:
def __init__(self) -> None:
registered_algorithms = {
"kmeans": BenchmarkKMeans,
"knn": BenchmarkNearestNeighbors,
"linear_regression": BenchmarkLinearRegression,
"pca": BenchmarkPCA,
"random_forest_classifier": BenchmarkRandomForestClassifier,
"random_forest_regressor": BenchmarkRandomForestRegressor,
"logistic_regression": BenchmarkLogisticRegression,
"umap": BenchmarkUMAP,
}
algorithms = "\n ".join(registered_algorithms.keys())
parser = argparse.ArgumentParser(
description="Benchmark Spark Rapids ML algorithms",
usage=f"""benchmark_runner.py <algorithm> [<args>]
Supported algorithms are:
{algorithms}
""",
)
parser.add_argument("algorithm", help="benchmark the ML algorithms")
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if args.algorithm not in registered_algorithms:
print("Unrecognized algorithm: ", args.algorithm)
parser.print_help()
exit(1)
self._runner: BenchmarkBase = registered_algorithms[args.algorithm]( # type: ignore
sys.argv[2:]
)
def run(self) -> None:
self._runner.run()
if __name__ == "__main__":
"""
There're two ways to do the benchmark.
1.
python benchmark_runner.py [linear_regression] \
--num_gpus=2 \
--train_path=xxx \
--spark_confs="spark.master=local[12]" \
2.
spark-submit --master local[12] benchmark_runner.py --num_gpus=2 --train_path=xxx
"""
BenchmarkRunner().run()
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark_runner.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pprint
import time
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from pandas import DataFrame as PandasDataFrame
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, sum
from benchmark.base import BenchmarkBase
from benchmark.utils import inspect_default_params_from_func, with_benchmark
class BenchmarkUMAP(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
"""Note: only needed for Spark PCA on CPU."""
from pyspark.ml.feature import PCA
params = inspect_default_params_from_func(
PCA.__init__,
[
"featuresCol",
"labelCol",
"predictionCol",
"probabilityCol",
"rawPredictionCol",
"weightCol",
"leafCol",
],
)
params["k"] = int
return params
def _parse_arguments(self, argv: List[Any]) -> None:
"""Override to set class params based on cpu or gpu run (umap or pca)"""
pp = pprint.PrettyPrinter()
self._args = self._parser.parse_args(argv)
print("command line arguments:")
pp.pprint(vars(self._args))
if self._args.num_cpus > 0:
supported_class_params = self._supported_class_params()
else:
supported_class_params = {}
self._class_params = {
k: v
for k, v in vars(self._args).items()
if k in supported_class_params and v is not None
}
print("\nclass params:")
pp.pprint(self._class_params)
print()
def _add_extra_arguments(self) -> None:
self._parser.add_argument(
"--no_cache",
action="store_true",
default=False,
help="whether to enable dataframe repartition, cache and cout outside fit function",
)
def score(
self, transformed_df: DataFrame, data_col: str, transformed_col: str
) -> float:
"""Computes the trustworthiness score, a measure of the extent to which the local structure
of the dataset is retained in the embedding of the UMAP model (or the projection in the case of PCA).
Score is in the range of [0, 1].
Parameters
----------
transformed_df
Model transformed data.
data_col
Name of column with the input data.
Note: This column is expected to be of pyspark sql 'array' type.
transformed_col
Name of column with the transformed data.
Note: This column is expected to be of pyspark sql 'array' type.
Returns
-------
float
The trustworthiness score of the transformed data.
"""
from cuml.metrics import trustworthiness
pdf: PandasDataFrame = transformed_df.toPandas()
embedding = np.array(pdf[transformed_col].to_list())
input = np.array(pdf[data_col].to_list())
score = trustworthiness(input, embedding, n_neighbors=15)
return score
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
"""
This function evaluates the runtimes of Spark Rapids ML UMAP and Spark PCA and returns the
trustworthiness score of the model projections. The primary purpose is to help understand GPU behavior
and performance.
"""
num_gpus = self.args.num_gpus
num_cpus = self.args.num_cpus
no_cache = self.args.no_cache
func_start_time = time.time()
first_col = train_df.dtypes[0][0]
first_col_type = train_df.dtypes[0][1]
is_array_col = True if "array" in first_col_type else False
is_vector_col = True if "vector" in first_col_type else False
is_single_col = is_array_col or is_vector_col
if not is_single_col:
input_cols = [c for c in train_df.schema.names]
if num_gpus > 0:
from spark_rapids_ml.umap import UMAP, UMAPModel
assert num_cpus <= 0
if not no_cache:
def gpu_cache_df(df: DataFrame) -> DataFrame:
df = df.repartition(num_gpus).cache()
df.count()
return df
train_df, prepare_time = with_benchmark(
"prepare dataset", lambda: gpu_cache_df(train_df)
)
gpu_estimator = UMAP(
num_workers=num_gpus,
verbose=self.args.verbose,
)
if is_single_col:
gpu_estimator = gpu_estimator.setFeaturesCol(first_col)
else:
gpu_estimator = gpu_estimator.setFeaturesCols(input_cols)
output_col = "embedding"
gpu_estimator = gpu_estimator.setOutputCol(output_col)
gpu_model, fit_time = with_benchmark(
"gpu fit", lambda: gpu_estimator.fit(train_df)
)
def transform(model: UMAPModel, df: DataFrame) -> DataFrame:
transformed_df = model.transform(df)
transformed_df.count()
return transformed_df
transformed_df, transform_time = with_benchmark(
"gpu transform", lambda: transform(gpu_model, train_df)
)
total_time = round(time.time() - func_start_time, 2)
print(f"gpu total took: {total_time} sec")
data_col = "features"
if num_cpus > 0:
from pyspark.ml.feature import PCA as SparkPCA
assert num_gpus <= 0
if is_array_col:
vector_df = train_df.select(
array_to_vector(train_df[first_col]).alias(first_col)
)
elif not is_vector_col:
vector_assembler = VectorAssembler(outputCol="features").setInputCols(
input_cols
)
vector_df = vector_assembler.transform(train_df).drop(*input_cols)
first_col = "features"
else:
vector_df = train_df
if not no_cache:
def cpu_cache_df(df: DataFrame) -> DataFrame:
df = df.cache()
df.count()
return df
vector_df, prepare_time = with_benchmark(
"prepare dataset", lambda: cpu_cache_df(vector_df)
)
output_col = "pca_features"
params = self.class_params
print(f"Passing {params} to SparkPCA")
cpu_pca = SparkPCA(**params).setInputCol(first_col).setOutputCol(output_col)
cpu_model, fit_time = with_benchmark(
"cpu fit", lambda: cpu_pca.fit(vector_df)
)
def cpu_transform(df: DataFrame) -> None:
transformed_df = cpu_model.transform(df)
transformed_df.select(
(vector_to_array(col(output_col))[0]).alias("zero")
).agg(sum("zero")).collect()
return transformed_df
transformed_df, transform_time = with_benchmark(
"cpu transform", lambda: cpu_transform(vector_df)
)
total_time = round(time.time() - func_start_time, 2)
print(f"cpu total took: {total_time} sec")
data_col = first_col
score = self.score(transformed_df, data_col, output_col)
print(f"trustworthiness score: {score}")
report_dict = {
"fit": fit_time,
"transform": transform_time,
"total_time": total_time,
"trustworthiness": score,
"num_gpus": num_gpus,
"num_cpus": num_cpus,
"no_cache": no_cache,
"train_path": self.args.train_path,
}
if not no_cache:
report_dict["prepare"] = prepare_time
return report_dict
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_umap.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List, Optional, Union
from pyspark.ml.evaluation import (
BinaryClassificationEvaluator,
MulticlassClassificationEvaluator,
)
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, sum
from .base import BenchmarkBase
from .utils import inspect_default_params_from_func, with_benchmark
class BenchmarkRandomForestClassifier(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.classification import RandomForestClassifier
# pyspark paramters
params = inspect_default_params_from_func(
RandomForestClassifier.__init__,
[
"featuresCol",
"labelCol",
"predictionCol",
"probabilityCol",
"rawPredictionCol",
"weightCol",
"leafCol",
],
)
# must replace the None to the correct type
params["seed"] = int
# cuML paramters
params["n_streams"] = (
int,
"cuML parameters, number of parallel streams used for forest building",
)
params["max_batch_size"] = (
int,
"cuML parameters, maximum number of nodes that can be processed in a given batch",
)
return params
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_col: Optional[str],
) -> Dict[str, Any]:
assert label_col is not None
assert self.args is not None
params = self.class_params
print(f"Passing {params} to RandomForestClassifier")
if self.args.num_gpus > 0:
from spark_rapids_ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
num_workers=self.args.num_gpus, verbose=self.args.verbose, **params
)
benchmark_string = "Spark Rapids ML RandomForestClassifier"
else:
from pyspark.ml.classification import (
RandomForestClassifier as SparkRandomForestClassifier,
)
rfc = SparkRandomForestClassifier(**params) # type: ignore[assignment]
benchmark_string = "Spark ML RandomForestClassifier"
rfc.setFeaturesCol(features_col)
rfc.setLabelCol(label_col)
model, training_time = with_benchmark(
f"{benchmark_string} training:", lambda: rfc.fit(train_df)
)
eval_df = train_df if transform_df is None else transform_df
df_with_preds = model.transform(eval_df)
# model does not yet have col getters setters and uses default value for prediction col
prediction_col = model.getOrDefault(model.predictionCol)
probability_col = model.getOrDefault(model.probabilityCol)
# run a simple dummy computation to trigger transform. count is short
# circuited due to pandas_udf used internally
_, transform_time = with_benchmark(
f"{benchmark_string} transform:",
lambda: df_with_preds.agg(sum(prediction_col)).collect(),
)
if model.numClasses == 2:
# binary classification
evaluator: Union[
BinaryClassificationEvaluator, MulticlassClassificationEvaluator
] = (
BinaryClassificationEvaluator()
.setRawPredictionCol(probability_col)
.setLabelCol(label_col)
)
else:
evaluator = (
MulticlassClassificationEvaluator()
.setPredictionCol(prediction_col)
.setLabelCol(label_col)
)
accuracy = evaluator.evaluate(df_with_preds)
print(f"{benchmark_string} accuracy: {accuracy}")
results = {
"training_time": training_time,
"transform_time": transform_time,
"accuracy": accuracy,
}
return results
class BenchmarkRandomForestRegressor(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.regression import RandomForestRegressor
params = inspect_default_params_from_func(
RandomForestRegressor,
["featuresCol", "labelCol", "predictionCol", "weightCol", "leafCol"],
)
# must replace the None to the correct type
params["seed"] = int
# cuML paramters
params["n_streams"] = (
int,
"cuML parameters, number of parallel streams used for forest building",
)
params["max_batch_size"] = (
int,
"cuML parameters, maximum number of nodes that can be processed in a given batch",
)
return params
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_col: Optional[str],
) -> Dict[str, Any]:
assert label_col is not None
params = self.class_params
print(f"Passing {params} to RandomForestRegressor")
if self.args.num_gpus > 0:
from spark_rapids_ml.regression import RandomForestRegressor
rf = RandomForestRegressor(
num_workers=self.args.num_gpus, verbose=self.args.verbose, **params
)
benchmark_string = "Spark Rapids ML RandomForestRegressor"
else:
from pyspark.ml.regression import (
RandomForestRegressor as SparkRandomForestRegressor,
)
rf = SparkRandomForestRegressor(**params) # type: ignore[assignment]
benchmark_string = "Spark ML RandomForestRegressor"
rf.setFeaturesCol(features_col)
rf.setLabelCol(label_col)
model, training_time = with_benchmark(
f"{benchmark_string} training:", lambda: rf.fit(train_df)
)
eval_df = train_df if transform_df is None else transform_df
df_with_preds = model.transform(eval_df)
# model does not yet have col getters setters and uses default value for prediction col
prediction_col = model.getOrDefault(model.predictionCol)
# run a simple dummy computation to trigger transform. count is short
# circuited due to pandas_udf used internally
_, transform_time = with_benchmark(
f"{benchmark_string} transform:",
lambda: df_with_preds.agg(sum(prediction_col)).collect(),
)
# compute prediction mse on training data
from pyspark.ml.evaluation import RegressionEvaluator
evaluator = (
RegressionEvaluator()
.setPredictionCol(prediction_col)
.setLabelCol(label_col)
)
rmse = evaluator.evaluate(df_with_preds)
print(f"{benchmark_string} RMSE: {rmse}")
results = {
"training_time": training_time,
"transform_time": transform_time,
"rmse": rmse,
}
return results
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_random_forest.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List, Optional, Union
import numpy as np
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import sum
from .base import BenchmarkBase
from .utils import inspect_default_params_from_func, with_benchmark
class BenchmarkLinearRegression(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.regression import LinearRegression
params = inspect_default_params_from_func(
LinearRegression.__init__,
["featuresCol", "labelCol", "predictionCol", "weightCol"],
)
return params
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
assert label_name is not None
params = self.class_params
print(f"Passing {params} to LinearRegression")
if self.args.num_gpus > 0:
from spark_rapids_ml.regression import LinearRegression
lr = LinearRegression(
num_workers=self.args.num_gpus, verbose=self.args.verbose, **params
)
benchmark_string = "Spark Rapids ML LinearRegression training"
else:
from pyspark.ml.regression import LinearRegression as SparkLinearRegression
lr = SparkLinearRegression(**params) # type: ignore[assignment]
benchmark_string = "Spark ML LinearRegression training"
lr.setFeaturesCol(features_col)
lr.setLabelCol(label_name)
model, fit_time = with_benchmark(benchmark_string, lambda: lr.fit(train_df))
# placeholder try block till hasSummary is supported in gpu model
try:
if model.hasSummary:
print(f"total iterations: {model.summary.totalIterations}")
print(f"objective history: {model.summary.objectiveHistory}")
except:
print("model does not have hasSummary attribute")
eval_df = train_df if transform_df is None else transform_df
df_with_preds = model.transform(eval_df)
# model does not yet have col getters setters and uses default value for prediction col
prediction_col = model.getOrDefault(model.predictionCol)
# run a simple dummy computation to trigger transform. count is short
# circuited due to pandas_udf used internally
_, transform_time = with_benchmark(
"Spark ML LinearRegression transform",
lambda: df_with_preds.agg(sum(prediction_col)).collect(),
)
# compute prediction mse on training data
from pyspark.ml.evaluation import RegressionEvaluator
evaluator = (
RegressionEvaluator()
.setPredictionCol(prediction_col)
.setLabelCol(label_name)
)
rmse = evaluator.evaluate(df_with_preds)
coefficients = np.array(model.coefficients)
coefs_l1 = np.sum(np.abs(coefficients))
coefs_l2 = np.sum(coefficients**2)
l2_penalty_factor = 0.5 * lr.getRegParam() * (1.0 - lr.getElasticNetParam())
l1_penalty_factor = lr.getRegParam() * lr.getElasticNetParam()
full_objective = (
0.5 * (rmse**2)
+ coefs_l2 * l2_penalty_factor
+ coefs_l1 * l1_penalty_factor
)
# note: results for spark ML and spark rapids ml will currently match in all regularization
# cases only if features and labels were standardized in the original dataset. Otherwise,
# they will match only if regParam = 0 or elastNetParam = 1.0 (aka Lasso)
print(
f"RMSE: {rmse}, coefs l1: {coefs_l1}, coefs l2^2: {coefs_l2}, "
f"full_objective: {full_objective}, intercept: {model.intercept}"
)
results = {
"fit_time": fit_time,
"transform_time": transform_time,
"RMSE": rmse,
"coefs_l1": coefs_l1,
"coefs_l2": coefs_l2,
"full_objective": full_objective,
"intercept": model.intercept,
}
return results
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_linear_regression.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from pyspark.ml.feature import StandardScaler, VectorAssembler
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, sum
from pyspark.sql.types import DoubleType, StructField, StructType
from .base import BenchmarkBase
from .utils import inspect_default_params_from_func, with_benchmark
class BenchmarkPCA(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.feature import PCA
params = inspect_default_params_from_func(
PCA.__init__,
[
"featuresCol",
"labelCol",
"predictionCol",
"probabilityCol",
"rawPredictionCol",
"weightCol",
"leafCol",
],
)
params["k"] = int
return params
def _add_extra_arguments(self) -> None:
self._parser.add_argument(
"--no_cache",
action="store_true",
default=False,
help="whether to enable dataframe repartition, cache and cout outside fit function",
)
def score(
self, pc_vectors: np.ndarray, transformed_df: DataFrame, transformed_col: str
) -> Tuple[float, float]:
"""Computes a measure of orthonormality of the pc_vectors: maximum absolute deviation from 1 from all norms
and absolute deviation from 0 of all dot products between the pc_vectors. This should be very small.
Also computes the sum of squares of the transformed_col vectors. The larger the better.
PCA projection should have been performed on mean removed input for the second metric to be relevant.
Parameters
----------
pc_vectors
principal component vectors.
pc_vectors.shape assumed to be (dim, k).
transformed_df
PCAModel transformed data.
transformed_col
Name of column with the PCA transformed data.
Note: This column is expected to be of pyspark sql 'array' type.
Returns
-------
Tuple[float, float]
The components of the returned tuple are respectively the orthonormality score and
the sum of squares of the transformed vectors.
"""
pc_vectors = np.array(pc_vectors, dtype=np.float64)
k = pc_vectors.shape[1]
pc_vectors_self_prod = np.matmul(np.transpose(pc_vectors), pc_vectors)
orthonormality_score = np.max(np.abs(np.eye(k) - pc_vectors_self_prod))
def partition_score_udf(pdf_iter: Iterator[pd.DataFrame]) -> Iterator[float]:
partition_score = 0.0
for pdf in pdf_iter:
transformed_vecs = np.array(
list(pdf[transformed_col]), dtype=np.float64
)
partition_score += np.sum(transformed_vecs**2)
yield pd.DataFrame({"partition_score": [partition_score]})
total_score = (
transformed_df.mapInPandas(
partition_score_udf, # type: ignore
StructType([StructField("partition_score", DoubleType(), True)]),
)
.agg(sum("partition_score").alias("total_score"))
.toPandas()
)
total_score = total_score["total_score"][0] # type: ignore
return orthonormality_score, total_score
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
n_components = self.args.k
num_gpus = self.args.num_gpus
num_cpus = self.args.num_cpus
no_cache = self.args.no_cache
func_start_time = time.time()
first_col = train_df.dtypes[0][0]
first_col_type = train_df.dtypes[0][1]
is_array_col = True if "array" in first_col_type else False
is_vector_col = True if "vector" in first_col_type else False
is_single_col = is_array_col or is_vector_col
if not is_single_col:
input_cols = [c for c in train_df.schema.names]
if num_gpus > 0:
from spark_rapids_ml.feature import PCA
assert num_cpus <= 0
if not no_cache:
def gpu_cache_df(df: DataFrame) -> DataFrame:
df = df.repartition(num_gpus).cache()
df.count()
return df
train_df, prepare_time = with_benchmark(
"prepare session and dataset", lambda: gpu_cache_df(train_df)
)
params = self.class_params
print(f"Passing {params} to PCA")
output_col = "pca_features"
gpu_pca = (
PCA(num_workers=num_gpus, verbose=self.args.verbose, **params)
.setInputCol(features_col)
.setOutputCol(output_col)
)
gpu_model, fit_time = with_benchmark(
"gpu fit", lambda: gpu_pca.fit(train_df)
)
def gpu_transform(df: DataFrame) -> DataFrame:
transformed_df = gpu_model.transform(df)
transformed_df.select((col(output_col)[0]).alias("zero")).agg(
sum("zero")
).collect()
return transformed_df
transformed_df, transform_time = with_benchmark(
"gpu transform", lambda: gpu_transform(train_df)
)
total_time = round(time.time() - func_start_time, 2)
print(f"gpu total took: {total_time} sec")
# spark ml does not remove the mean in the transformed features, so do that here
# needed for scoring
feature_col = output_col
df_for_scoring = transformed_df.select(
array_to_vector(col(output_col)).alias(output_col + "_vec")
)
standard_scaler = (
StandardScaler()
.setWithStd(False)
.setWithMean(True)
.setInputCol(output_col + "_vec")
.setOutputCol(output_col + "_mean_removed")
)
scaler_model = standard_scaler.fit(df_for_scoring)
df_for_scoring = (
scaler_model.transform(df_for_scoring)
.drop(output_col + "_vec")
.select(
vector_to_array(col(output_col + "_mean_removed")).alias(
feature_col
)
)
)
pc_for_scoring = gpu_model.pc.toArray()
if num_cpus > 0:
from pyspark.ml.feature import PCA as SparkPCA
assert num_gpus <= 0
if is_array_col:
vector_df = train_df.select(
array_to_vector(train_df[first_col]).alias(first_col)
)
elif not is_vector_col:
vector_assembler = VectorAssembler(outputCol="features").setInputCols(
input_cols
)
vector_df = vector_assembler.transform(train_df).drop(*input_cols)
first_col = "features"
else:
vector_df = train_df
if not no_cache:
def cpu_cache_df(df: DataFrame) -> DataFrame:
df = df.cache()
df.count()
return df
vector_df, prepare_time = with_benchmark(
"prepare dataset", lambda: cpu_cache_df(vector_df)
)
output_col = "pca_features"
params = self.class_params
print(f"Passing {params} to SparkPCA")
cpu_pca = SparkPCA(**params).setInputCol(first_col).setOutputCol(output_col)
cpu_model, fit_time = with_benchmark(
"cpu fit", lambda: cpu_pca.fit(vector_df)
)
def cpu_transform(df: DataFrame) -> None:
transformed_df = cpu_model.transform(df)
transformed_df.select(
(vector_to_array(col(output_col))[0]).alias("zero")
).agg(sum("zero")).collect()
return transformed_df
transformed_df, transform_time = with_benchmark(
"cpu transform", lambda: cpu_transform(vector_df)
)
total_time = round(time.time() - func_start_time, 2)
print(f"cpu total took: {total_time} sec")
# spark ml does not remove the mean in the transformed features, so do that here
# needed for scoring
standard_scaler = (
StandardScaler()
.setWithStd(False)
.setWithMean(True)
.setInputCol(output_col)
.setOutputCol(output_col + "_mean_removed")
)
scaler_model = standard_scaler.fit(transformed_df)
transformed_df = scaler_model.transform(transformed_df).drop(output_col)
feature_col = output_col + "_mean_removed"
pc_for_scoring = cpu_model.pc.toArray()
df_for_scoring = transformed_df.select(
vector_to_array(col(feature_col)).alias(feature_col)
)
orthonormality, variance = self.score(
pc_for_scoring, df_for_scoring, feature_col
)
print(f"orthonormality score: {orthonormality}, variance score {variance}")
report_dict = {
"fit": fit_time,
"transform": transform_time,
"total": total_time,
"orthonormality": orthonormality,
"variance": variance,
"k": self.args.k,
"num_gpus": self.args.num_gpus,
"num_cpus": self.args.num_cpus,
"no_cache": self.args.no_cache,
"train_path": self.args.train_path,
}
if not no_cache:
report_dict["prepare"] = prepare_time
return report_dict
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_pca.py |
spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/__init__.py |
|
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List, Optional, Union
import numpy as np
from pyspark.ml.feature import StandardScaler
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col, sum
from .base import BenchmarkBase
from .utils import inspect_default_params_from_func, with_benchmark
class BenchmarkLogisticRegression(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.classification import LogisticRegression
params = inspect_default_params_from_func(
LogisticRegression.__init__,
[
"featuresCol",
"labelCol",
"predictionCol",
"weightCol",
"elasticNetParam",
"threshold",
"thresholds",
"aggregationDepth",
"maxBlockSizeInMB",
"lowerBoundsOnCoefficients",
"upperBoundsOnCoefficients",
"lowerBoundsOnIntercepts",
"upperBoundsOnIntercepts",
],
)
return params
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
assert label_name is not None
params = self.class_params
print(f"Passing {params} to LogisticRegression")
if self.args.num_gpus > 0:
from spark_rapids_ml.classification import LogisticRegression
lr = LogisticRegression(num_workers=self.args.num_gpus, **params)
benchmark_string = "Spark Rapids ML LogisticRegression training"
else:
from pyspark.ml.classification import (
LogisticRegression as SparkLogisticRegression,
)
lr = SparkLogisticRegression(**params) # type: ignore[assignment]
benchmark_string = "Spark ML LogisticRegression training"
lr.setFeaturesCol(features_col)
lr.setLabelCol(label_name)
model, fit_time = with_benchmark(benchmark_string, lambda: lr.fit(train_df))
# placeholder try block till hasSummary is supported in gpu model
if model.hasSummary:
print(f"total iterations: {model.summary.totalIterations}")
print(f"objective history: {model.summary.objectiveHistory}")
else:
print("model does not have hasSummary attribute")
eval_df = train_df if transform_df is None else transform_df
eval_df_with_preds = model.transform(eval_df)
train_df_with_preds = model.transform(train_df)
# model does not yet have col getters setters and uses default value for prediction col
prediction_col = model.getPredictionCol()
probability_col = model.getProbabilityCol()
# run a simple dummy computation to trigger transform. count is short
# circuited due to pandas_udf used internally
_, transform_time = with_benchmark(
"Spark ML LogisticRegression transform",
lambda: eval_df_with_preds.agg(sum(prediction_col)).collect(),
)
from pyspark.ml.evaluation import (
BinaryClassificationEvaluator,
MulticlassClassificationEvaluator,
)
# TODO: support multiple classes
# binary classification
evaluator_train: Union[
BinaryClassificationEvaluator, MulticlassClassificationEvaluator
] = (
MulticlassClassificationEvaluator()
.setMetricName("logLoss") # type:ignore
.setPredictionCol(prediction_col)
.setProbabilityCol(probability_col)
.setLabelCol(label_name)
)
evaluator_test: Union[
BinaryClassificationEvaluator, MulticlassClassificationEvaluator
] = (
BinaryClassificationEvaluator()
.setRawPredictionCol(probability_col)
.setLabelCol(label_name)
)
log_loss = evaluator_train.evaluate(train_df_with_preds)
coefficients = np.array(model.coefficients)
coefs_l1 = np.sum(np.abs(coefficients))
coefs_l2 = np.sum(coefficients**2)
# TODO: add l1 regularization penalty term to full objective for when we support it
train_full_objective = log_loss + 0.5 * lr.getRegParam() * coefs_l2
eval_auc = evaluator_test.evaluate(eval_df_with_preds)
print(f"{benchmark_string} train_full_objective: {train_full_objective}")
print(f"{benchmark_string} eval_auc: {eval_auc}")
results = {
"fit_time": fit_time,
"transform_time": transform_time,
"train_full_objective": train_full_objective,
"eval_auc": eval_auc,
"num_gpus": self.args.num_gpus,
"num_cpus": self.args.num_cpus,
"train_path": self.args.train_path,
"maxIter": params["maxIter"],
"tol": params["tol"],
"regParam": params["regParam"],
"standardization": params["standardization"],
}
return results
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_logistic_regression.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from typing import Any, Dict, Iterator, List, Optional, Union
import numpy as np
import pandas as pd
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import array, col, sum
from pyspark.sql.types import DoubleType, StructField, StructType
from .base import BenchmarkBase
from .utils import inspect_default_params_from_func, with_benchmark
class BenchmarkKMeans(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
from pyspark.ml.clustering import KMeans
params = inspect_default_params_from_func(
KMeans.__init__,
[
"distanceMeasure",
"featuresCol",
"labelCol",
"predictionCol",
"probabilityCol",
"rawPredictionCol",
"weightCol",
"leafCol",
],
)
params["seed"] = 1
return params
def _add_extra_arguments(self) -> None:
self._parser.add_argument(
"--no_cache",
action="store_true",
default=False,
help="whether to enable dataframe repartition, cache and cout outside fit function",
)
def score(
self,
centers: np.ndarray,
transformed_df: DataFrame,
features_col: str,
prediction_col: str,
) -> float:
"""Computes the sum of squared euclidean distances between vectors in the features_col
of transformed_df and the vector in centers having the corresponding index value in prediction_col.
This is the objective function being optimized by the kmeans algorithm. It is also referred to as inertia.
Parameters
----------
centers
KMeans computed center/centroid vectors.
transformed_df
KMeansModel transformed data.
features_col
Name of features column.
Note: this column is assumed to be of pyspark sql 'array' type.
prediction_col
Name of prediction column (index of nearest centroid, as computed by KMeansModel.transform)
Returns
-------
float
The computed inertia score, per description above.
"""
sc = transformed_df.rdd.context
centers_bc = sc.broadcast(centers)
def partition_score_udf(
pdf_iter: Iterator[pd.DataFrame],
) -> Iterator[pd.DataFrame]:
local_centers = centers_bc.value.astype(np.float64)
partition_score = 0.0
for pdf in pdf_iter:
input_vecs = np.array(list(pdf[features_col]), dtype=np.float64)
predictions = list(pdf[prediction_col])
center_vecs = local_centers[predictions, :]
partition_score += np.sum((input_vecs - center_vecs) ** 2)
yield pd.DataFrame({"partition_score": [partition_score]})
total_score = (
transformed_df.mapInPandas(
partition_score_udf, # type: ignore
StructType([StructField("partition_score", DoubleType(), True)]),
)
.agg(sum("partition_score").alias("total_score"))
.toPandas()
) # type: ignore
total_score = total_score["total_score"][0] # type: ignore
return total_score
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
num_gpus = self.args.num_gpus
num_cpus = self.args.num_cpus
no_cache = self.args.no_cache
train_path = self.args.train_path
func_start_time = time.time()
first_col = train_df.dtypes[0][0]
first_col_type = train_df.dtypes[0][1]
is_array_col = True if "array" in first_col_type else False
is_vector_col = True if "vector" in first_col_type else False
is_single_col = is_array_col or is_vector_col
if not is_single_col:
input_cols = [c for c in train_df.schema.names]
output_col = "cluster_idx"
if num_gpus > 0:
from spark_rapids_ml.clustering import KMeans
assert num_cpus <= 0
if not no_cache:
def gpu_cache_df(df: DataFrame) -> DataFrame:
df = df.repartition(num_gpus).cache()
df.count()
return df
train_df, prepare_time = with_benchmark(
"prepare dataset", lambda: gpu_cache_df(train_df)
)
params = self.class_params
print(f"Passing {params} to KMeans")
gpu_estimator = KMeans(
num_workers=num_gpus, verbose=self.args.verbose, **params
).setPredictionCol(output_col)
if is_single_col:
gpu_estimator = gpu_estimator.setFeaturesCol(first_col)
else:
gpu_estimator = gpu_estimator.setFeaturesCols(input_cols)
gpu_model, fit_time = with_benchmark(
"gpu fit", lambda: gpu_estimator.fit(train_df)
)
transformed_df = gpu_model.setPredictionCol(output_col).transform(train_df)
# count doesn't trigger compute so do something not too compute intensive
_, transform_time = with_benchmark(
"gpu transform", lambda: transformed_df.agg(sum(output_col)).collect()
)
total_time = round(time.time() - func_start_time, 2)
print(f"gpu total time: {total_time} sec")
df_for_scoring = transformed_df
feature_col = first_col
if not is_single_col:
feature_col = "features_array"
df_for_scoring = transformed_df.select(
array(*input_cols).alias("features_array"), output_col
)
elif is_vector_col:
df_for_scoring = transformed_df.select(
vector_to_array(col(feature_col)), output_col
)
cluster_centers = gpu_model.cluster_centers_
if num_cpus > 0:
from pyspark.ml.clustering import KMeans as SparkKMeans
assert num_gpus <= 0
if is_array_col:
vector_df = train_df.select(
array_to_vector(train_df[first_col]).alias(first_col)
)
elif not is_vector_col:
vector_assembler = VectorAssembler(outputCol="features").setInputCols(
input_cols
)
vector_df = vector_assembler.transform(train_df).drop(*input_cols)
first_col = "features"
else:
vector_df = train_df
if not no_cache:
def cpu_cache_df(df: DataFrame) -> DataFrame:
df = df.cache()
df.count()
return df
vector_df, prepare_time = with_benchmark(
"prepare dataset", lambda: cpu_cache_df(vector_df)
)
params = self.class_params
print(f"Passing {params} to KMeans")
cpu_estimator = (
SparkKMeans(**params)
.setFeaturesCol(first_col)
.setPredictionCol(output_col)
)
cpu_model, fit_time = with_benchmark(
"cpu fit", lambda: cpu_estimator.fit(vector_df)
)
print(
f"spark ML: iterations: {cpu_model.summary.numIter}, inertia: {cpu_model.summary.trainingCost}"
)
def cpu_transform(df: DataFrame) -> None:
transformed_df = cpu_model.transform(df)
transformed_df.agg(sum(output_col)).collect()
return transformed_df
transformed_df, transform_time = with_benchmark(
"cpu transform", lambda: cpu_transform(vector_df)
)
total_time = time.time() - func_start_time
print(f"cpu total took: {total_time} sec")
feature_col = first_col
df_for_scoring = transformed_df.select(
vector_to_array(col(feature_col)).alias(feature_col), output_col
)
cluster_centers = cpu_model.clusterCenters()
# either cpu or gpu mode is run, not both in same run
score = self.score(
np.array(cluster_centers), df_for_scoring, feature_col, output_col
)
# note: seems that inertia matches score at iterations-1
print(f"score: {score}")
result = {
"fit_time": fit_time,
"transform_time": transform_time,
"total_time": total_time,
"score": score,
"k": self.args.k,
"maxIter": self.args.maxIter,
"tol": self.args.tol,
"num_gpus": num_gpus,
"num_cpus": num_cpus,
"no_cache": no_cache,
"train_path": train_path,
}
return result
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_kmeans.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from distutils.util import strtobool
from time import time
from typing import Any, Callable, Dict, List
from pyspark.sql import SparkSession
class WithSparkSession(object):
def __init__(self, confs: List[str], shutdown: bool = True) -> None:
builder = SparkSession.builder
for conf in confs:
key, value = (conf.split("=")[0], "=".join(conf.split("=")[1:]))
builder = builder.config(key, value)
self.spark = builder.getOrCreate()
self.shutdown = shutdown
def __enter__(self) -> SparkSession:
return self.spark
def __exit__(self, *args: Any) -> None:
if self.shutdown:
print("stopping spark session")
self.spark.stop()
def with_benchmark(phrase: str, action: Callable) -> Any:
start = time()
result = action()
end = time()
print("-" * 100)
duration = round(end - start, 2)
print("{}: {} seconds".format(phrase, duration))
print("-" * 100)
return result, duration
def inspect_default_params_from_func(
func: Callable, unsupported_set: List[str] = []
) -> Dict[str, Any]:
"""
Returns a dictionary of parameters and their default value of function fn.
Only the parameters with a default value will be included.
"""
sig = inspect.signature(func)
filtered_params_dict = {}
for parameter in sig.parameters.values():
# Remove parameters without a default value and those in the unsupported_set
if (
parameter.default is not parameter.empty
and parameter.default is not None
and parameter.name not in unsupported_set
):
filtered_params_dict[parameter.name] = parameter.default
return filtered_params_dict
def to_bool(literal: str) -> bool:
return bool(strtobool(literal))
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/utils.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from typing import Any, Dict, List, Optional, Union
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.functions import array_to_vector
from pyspark.sql import DataFrame, SparkSession
from benchmark.base import BenchmarkBase
from benchmark.utils import with_benchmark
class BenchmarkNearestNeighbors(BenchmarkBase):
def _supported_class_params(self) -> Dict[str, Any]:
params = {"n_neighbors": 200}
return params
def _add_extra_arguments(self) -> None:
self._parser.add_argument(
"--no_cache",
action="store_true",
default=False,
help="whether to enable dataframe repartition, cache and cout outside fit function",
)
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_name: Optional[str],
) -> Dict[str, Any]:
"""
This function evaluates the runtimes of Spark Rapids ML NearestNeighbors and Spark LSH, but
should not be used to compare the two. The purpose is to help understand GPU behavior
and performance.
"""
num_gpus = self.args.num_gpus
num_cpus = self.args.num_cpus
no_cache = self.args.no_cache
n_neighbors = self.args.n_neighbors
func_start_time = time.time()
first_col = train_df.dtypes[0][0]
first_col_type = train_df.dtypes[0][1]
is_array_col = True if "array" in first_col_type else False
is_vector_col = True if "vector" in first_col_type else False
is_single_col = is_array_col or is_vector_col
if not is_single_col:
input_cols = [c for c in train_df.schema.names]
if num_gpus > 0:
from spark_rapids_ml.knn import NearestNeighbors, NearestNeighborsModel
assert num_cpus <= 0
if not no_cache:
def gpu_cache_df(df: DataFrame) -> DataFrame:
df = df.repartition(num_gpus).cache()
df.count()
return df
train_df, prepare_time = with_benchmark(
"prepare dataset", lambda: gpu_cache_df(train_df)
)
params = self.class_params
gpu_estimator = NearestNeighbors(
num_workers=num_gpus, verbose=self.args.verbose, **params
)
if is_single_col:
gpu_estimator = gpu_estimator.setInputCol(first_col)
else:
gpu_estimator = gpu_estimator.setInputCols(input_cols)
gpu_model, fit_time = with_benchmark(
"gpu fit", lambda: gpu_estimator.fit(train_df)
)
def transform(model: NearestNeighborsModel, df: DataFrame) -> DataFrame:
(item_df_withid, query_df_withid, knn_df) = model.kneighbors(df)
knn_df.count()
return knn_df
knn_df, transform_time = with_benchmark(
"gpu transform", lambda: transform(gpu_model, train_df)
)
total_time = round(time.time() - func_start_time, 2)
print(f"gpu total took: {total_time} sec")
if num_cpus > 0:
assert num_gpus <= 0
if is_array_col:
vector_df = train_df.select(
array_to_vector(train_df[first_col]).alias(first_col)
)
elif not is_vector_col:
vector_assembler = VectorAssembler(outputCol="features").setInputCols(
input_cols
)
vector_df = vector_assembler.transform(train_df).drop(*input_cols)
first_col = "features"
else:
vector_df = train_df
if not no_cache:
def cpu_cache_df(df: DataFrame) -> DataFrame:
df = df.cache()
df.count()
return df
vector_df, prepare_time = with_benchmark(
"prepare dataset", lambda: cpu_cache_df(vector_df)
)
from pyspark.ml.feature import (
BucketedRandomProjectionLSH,
BucketedRandomProjectionLSHModel,
)
cpu_estimator = BucketedRandomProjectionLSH(
inputCol=first_col,
outputCol="hashes",
bucketLength=2.0,
numHashTables=3,
)
cpu_model, fit_time = with_benchmark(
"cpu fit time", lambda: cpu_estimator.fit(vector_df)
)
def cpu_transform(
model: BucketedRandomProjectionLSHModel, df: DataFrame, n_neighbors: int
) -> None:
queries = df.collect()
for row in queries:
query = row[first_col]
knn_df = model.approxNearestNeighbors(
dataset=df, key=query, numNearestNeighbors=n_neighbors
)
knn_df.count()
_, transform_time = with_benchmark(
"cpu transform",
lambda: cpu_transform(cpu_model, vector_df, n_neighbors),
)
total_time = round(time.time() - func_start_time, 2)
print(f"cpu total took: {total_time} sec")
report_dict = {
"fit": fit_time,
"transform": transform_time,
"total_time": total_time,
"n_neighbors": n_neighbors,
"num_gpus": num_gpus,
"num_cpus": num_cpus,
"no_cache": no_cache,
"train_path": self.args.train_path,
}
if not no_cache:
report_dict["prepare"] = prepare_time
return report_dict
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/bench_nearest_neighbors.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pprint
import subprocess
from abc import abstractmethod
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
import pandas as pd
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.functions import array_to_vector
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import col
from .utils import WithSparkSession, to_bool, with_benchmark
class BenchmarkBase:
"""Based class for benchmarking.
This class handles command line argument parsing and execution of the benchmark.
"""
_parser: argparse.ArgumentParser
_args: argparse.Namespace
_class_params: Dict[str, Any]
def __init__(self, argv: List[Any]) -> None:
"""Parses command line arguments for the class under test."""
print("=" * 100)
print(self.__class__.__name__)
print("=" * 100)
# common params for all benchmark classes
self._parser = argparse.ArgumentParser()
self._parser.add_argument(
"--num_gpus",
type=int,
default=1,
help="number of GPUs to use. If num_gpus > 0, will run with the number of dataset partitions equal to num_gpus.",
)
self._parser.add_argument(
"--num_cpus",
type=int,
default=6,
help="number of CPUs to use",
)
self._parser.add_argument(
"--num_runs",
type=int,
default=1,
help="number of benchmark iterations (for cold/warm runs)",
)
self._parser.add_argument(
"--report_path", type=str, default="", help="path to save benchmark results"
)
self._parser.add_argument(
"--train_path",
action="append",
default=[],
required=True,
help="path to parquet data for training",
)
self._parser.add_argument(
"--transform_path",
action="append",
default=[],
help="path to parquet data for transform",
)
self._parser.add_argument("--spark_confs", action="append", default=[])
self._parser.add_argument(
"--no_shutdown",
action="store_true",
help="do not stop spark session when finished",
)
self._parser.add_argument(
"--verbose",
action="store_const",
const=7,
default=0,
help="set cuml logging to max verbose level",
)
self._add_class_arguments()
self._add_extra_arguments()
self._parse_arguments(argv)
def _add_extra_arguments(self) -> None:
"""Add command line arguments for the benchmarking environment."""
pass
def _add_class_arguments(self) -> None:
"""
Add command line arguments for the parameters to be supplied to the class under test.
The default implementation automatically constructs arguments from the dictionary returned
from the :py:func:`_supported_class_params()` method.
"""
for name, value in self._supported_class_params().items():
(value, help) = value if isinstance(value, tuple) else (value, None)
help = "PySpark parameter" if help is None else help
if value is None:
raise RuntimeError("Must convert None value to the correct type")
elif type(value) is type:
# value is already type
self._parser.add_argument("--" + name, type=value, help=help)
elif type(value) is bool:
self._parser.add_argument(
"--" + name, type=to_bool, default=value, help=help
)
else:
# get the type from the value
self._parser.add_argument(
"--" + name, type=type(value), default=value, help=help
)
def _supported_class_params(self) -> Dict[str, Any]:
"""
Return a dictionary of parameter names to values/types for the class under test.
These parameters will be exposed as command line arguments.
"""
return {}
def _parse_arguments(self, argv: List[Any]) -> None:
"""Parse all command line arguments, separating out the parameters for the class under test."""
pp = pprint.PrettyPrinter()
self._args = self._parser.parse_args(argv)
print("command line arguments:")
pp.pprint(vars(self._args))
supported_class_params = self._supported_class_params()
self._class_params = {
k: v
for k, v in vars(self._args).items()
if k in supported_class_params and v is not None
}
print("\nclass params:")
pp.pprint(self._class_params)
print()
@property
def args(self) -> argparse.Namespace:
"""Return all parsed command line arguments."""
return self._args
@property
def class_params(self) -> Dict[str, Any]:
return self._class_params
def git_revision(self) -> str:
rev = "unknown"
try:
rev = (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.decode("ascii")
.strip()
)
except Exception:
pass
return rev
def input_dataframe(
self, spark: SparkSession, *paths: str
) -> Tuple[DataFrame, Union[str, List[str]], str]:
"""Return a Spark DataFrame for benchmarking, along with the input and label column names."""
assert self._args is not None
df = spark.read.parquet(*paths)
# Label column label is "label" which is hardcoded by gen_data.py
label_col = "label"
features_col = [c for c in df.schema.names if c != label_col]
features_col = features_col[0] if len(features_col) == 1 else features_col # type: ignore
selected_cols = []
if self._args.num_gpus == 0:
# convert to vector for CPU Spark, since it only supports vector feature types
if label_col in df.schema.names:
selected_cols.append(col(label_col))
if any(["array" in t[1] for t in df.dtypes]):
# Array Type
selected_cols.append(
array_to_vector(col(features_col)).alias("features") # type: ignore
)
features_col = "features" # type: ignore
elif not any(["vector" in t[1] for t in df.dtypes]):
# multi-cols
selected_cols.append(col("features"))
df = (
VectorAssembler()
.setInputCols(features_col)
.setOutputCol("features")
.transform(df)
.drop(*features_col)
)
features_col = "features" # type: ignore
else:
# Vector Type
selected_cols = [] # just use original df
train_df = df.select(*selected_cols) if len(selected_cols) > 0 else df
return train_df, features_col, label_col
def run(self) -> None:
"""Runs benchmarks for the class under test and"""
assert self._args is not None
run_results = []
git_revision = self.git_revision()
with WithSparkSession(
self._args.spark_confs, shutdown=(not self._args.no_shutdown)
) as spark:
for _ in range(self._args.num_runs):
train_df, features_col, label_col = self.input_dataframe(
spark, *self._args.train_path
)
transform_df: Optional[DataFrame] = None
if len(self._args.transform_path) > 0:
transform_df, _, _ = self.input_dataframe(
spark, *self._args.transform_path
)
benchmark_results, benchmark_time = with_benchmark(
"benchmark time: ",
lambda: self.run_once(
spark, train_df, features_col, transform_df, label_col
),
)
results = {
"datetime": datetime.now().isoformat(),
"git_hash": git_revision,
"benchmark_time": benchmark_time,
}
results.update(benchmark_results)
run_results.append(results)
# dictionary results
print("-" * 100)
print("Results (python dictionary):")
for i, results in enumerate(run_results):
print(f"{i}: {results}")
# tabular results
print("-" * 100)
print("Results (pandas DataFrame):")
report_pdf = pd.DataFrame(run_results)
print(report_pdf)
print("-" * 100)
# save results to disk
if self._args.report_path != "":
report_pdf.to_csv(self._args.report_path, mode="a")
@abstractmethod
def run_once(
self,
spark: SparkSession,
train_df: DataFrame,
features_col: Union[str, List[str]],
transform_df: Optional[DataFrame],
label_col: Optional[str],
) -> Dict[str, Any]:
"""Run a single iteration of benchmarks for the class under test, returning a summary of
timing and/or scoring results in dictionary form."""
raise NotImplementedError
| spark-rapids-ml-branch-23.10 | python/benchmark/benchmark/base.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, Optional
import numpy as np
from spark_rapids_ml.utils import (
_concat_and_free,
_get_default_params_from_func,
_unsupported_methods_attributes,
)
def test_get_default_params_from_func() -> None:
def dummy_func(a=1, b=2, c=3, d=4) -> None: # type: ignore
pass
params = _get_default_params_from_func(dummy_func, ["c"])
assert "c" not in params
assert len(params) == 3
assert params["a"] == 1
assert params["d"] == 4
def test_concat_and_free() -> None:
a = np.array([[0.0, 1.0], [2.0, 3.0]], order="F")
arr_list = [a, a]
concat = _concat_and_free(arr_list, order="C")
assert len(arr_list) == 0
assert concat.flags["C_CONTIGUOUS"]
assert not concat.flags["F_CONTIGUOUS"]
a = np.array([[0.0, 1.0], [2.0, 3.0]], order="C")
arr_list = [a, a]
concat = _concat_and_free(arr_list)
assert len(arr_list) == 0
assert not concat.flags["C_CONTIGUOUS"]
assert concat.flags["F_CONTIGUOUS"]
def test_unsupported_methods_attributes() -> None:
a = 1
assert _unsupported_methods_attributes(a) == set()
class A:
@classmethod
def _param_mapping(cls) -> Dict[str, Optional[str]]:
return {"param1": "param2", "param3": None, "param4": ""}
assert _unsupported_methods_attributes(A) == set(
["param3", "getParam3", "setParam3", "param4", "getParam4", "setParam4"]
)
def test_clean_sparksession() -> None:
from .sparksession import CleanSparkSession
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(1)}
# Clean SparkSession with extra conf
with CleanSparkSession(conf) as spark:
assert spark.conf.get("spark.sql.execution.arrow.maxRecordsPerBatch") == "1"
# Clean SparkSession
with CleanSparkSession() as spark:
assert spark.conf.get("spark.sql.execution.arrow.maxRecordsPerBatch") == "10000"
# Test Nested SparkSession
with CleanSparkSession(conf) as spark:
assert spark.conf.get("spark.sql.execution.arrow.maxRecordsPerBatch") == "1"
# Nested SparkSession will reset the conf
with CleanSparkSession() as spark:
assert (
spark.conf.get("spark.sql.execution.arrow.maxRecordsPerBatch")
== "10000"
)
# The conf has been reset.
assert spark.conf.get("spark.sql.execution.arrow.maxRecordsPerBatch") == "10000"
| spark-rapids-ml-branch-23.10 | python/tests/test_utils.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import shutil
import subprocess
import tempfile
from typing import Generator, List
import cupy
import pytest
from pyspark.sql import SparkSession
dir_path = os.path.dirname(os.path.realpath(__file__))
gpu_discovery_script_path = f"{dir_path}/discover_gpu.sh"
def _get_devices() -> List[str]:
"""This works only if driver is the same machine of worker."""
completed = subprocess.run(gpu_discovery_script_path, stdout=subprocess.PIPE)
assert completed.returncode == 0, "Failed to execute discovery script."
msg = completed.stdout.decode("utf-8")
result = json.loads(msg)
addresses = result["addresses"]
return addresses
_gpu_number = min(len(_get_devices()), cupy.cuda.runtime.getDeviceCount())
# We restrict the max gpu numbers to use
_gpu_number = _gpu_number if _gpu_number < 4 else 4
@pytest.fixture
def gpu_number() -> int:
return _gpu_number
@pytest.fixture
def tmp_path() -> Generator[str, None, None]:
path = tempfile.mkdtemp(prefix="spark_rapids_ml_tests_")
yield path
shutil.rmtree(path)
_default_conf = {
"spark.master": f"local[{_gpu_number}]",
"spark.python.worker.reuse": "false",
"spark.driver.host": "127.0.0.1",
"spark.task.maxFailures": "1",
"spark.driver.memory": "5g",
"spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false",
"spark.sql.pyspark.jvmStacktrace.enabled": "true",
}
def _get_spark() -> SparkSession:
builder = SparkSession.builder.appName(name="spark-rapids-ml python tests")
for k, v in _default_conf.items():
builder.config(k, v)
spark = builder.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
logging.getLogger("pyspark").setLevel(logging.WARN)
return spark
_spark = _get_spark()
def get_spark_i_know_what_i_am_doing() -> SparkSession:
"""
Get the current SparkSession.
This should almost never be called directly instead you should call
with_spark_session for spark_session.
This is to guarantee that the session and it's config is setup in a repeatable way.
"""
return _spark
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config: pytest.Config) -> None:
config.addinivalue_line("markers", "slow: mark as slow to run")
config.addinivalue_line("markers", "compat: mark as compatibility test")
def pytest_collection_modifyitems(
config: pytest.Config, items: List[pytest.Item]
) -> None:
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| spark-rapids-ml-branch-23.10 | python/tests/conftest.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import pandas as pd
import pytest
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, RegressionEvaluator
from spark_rapids_ml.metrics.MulticlassMetrics import MulticlassMetrics
from spark_rapids_ml.metrics.RegressionMetrics import RegressionMetrics
from .sparksession import CleanSparkSession
def get_multi_class_metrics(pdf: pd.DataFrame, num_classes: int) -> MulticlassMetrics:
confusion = pdf.groupby(["label", "prediction"]).size().reset_index(name="total")
tp_by_class = {}
fp_by_class = {}
label_count_by_class = {}
label_count = 0
for i in range(num_classes):
tp_by_class[float(i)] = 0.0
label_count_by_class[float(i)] = 0.0
fp_by_class[float(i)] = 0.0
for index, row in confusion.iterrows():
label_count += row.total
label_count_by_class[row.label] += row.total
if row.label == row.prediction:
tp_by_class[row.label] += row.total
else:
fp_by_class[row.prediction] += row.total
return MulticlassMetrics(
tp=tp_by_class,
fp=fp_by_class,
label=label_count_by_class,
label_count=label_count,
)
@pytest.mark.parametrize("num_classes", [4])
@pytest.mark.parametrize(
"metric_name",
MulticlassMetrics.SUPPORTED_MULTI_CLASS_METRIC_NAMES,
)
def test_multi_class_metrics(
num_classes: int,
metric_name: str,
) -> None:
columns = ["prediction", "label"]
np.random.seed(10)
pdf = pd.DataFrame(
np.random.randint(0, num_classes, size=(1000, 2)), columns=columns
).astype(np.float64)
metrics = get_multi_class_metrics(pdf, num_classes)
with CleanSparkSession() as spark:
sdf = spark.createDataFrame(
pdf.to_numpy().tolist(), ", ".join([f"{n} double" for n in columns])
)
evaluator = MulticlassClassificationEvaluator(
predictionCol="prediction",
labelCol="label",
)
evaluator.setMetricName(metric_name) # type: ignore
assert math.fabs(evaluator.evaluate(sdf) - metrics.evaluate((evaluator))) < 1e-6
def get_regression_metrics(
pdf: pd.DataFrame, label_name: str, prediction_name: str
) -> RegressionMetrics:
pdf = pdf.copy(True)
pdf.insert(1, "gap", pdf[label_name] - pdf[prediction_name])
mean = pdf.mean()
m2 = pdf.pow(2).sum()
l1 = pdf.abs().sum()
total_cnt = pdf.shape[0]
m2n = pdf.var(ddof=0) * pdf.shape[0]
return RegressionMetrics.create(mean, m2n, m2, l1, total_cnt)
@pytest.mark.parametrize("metric_name", ["rmse", "mse", "r2", "mae", "var"])
@pytest.mark.parametrize("max_record_batch", [100, 10000])
def test_regression_metrics(metric_name: str, max_record_batch: int) -> None:
columns = ["label", "prediction"]
np.random.seed(10)
pdf1 = pd.DataFrame(
np.random.uniform(low=-20, high=20, size=(1010, 2)), columns=columns
).astype(np.float64)
np.random.seed(100)
pdf2 = pd.DataFrame(
np.random.uniform(low=-20, high=20, size=(1000, 2)), columns=columns
).astype(np.float64)
metrics1 = get_regression_metrics(pdf1, columns[0], columns[1])
metrics2 = get_regression_metrics(pdf2, columns[0], columns[1])
metrics = metrics1.merge(metrics2)
pdf = pd.concat([pdf1, pdf2])
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
sdf = spark.createDataFrame(
pdf.to_numpy().tolist(), ", ".join([f"{n} double" for n in columns])
)
evaluator = RegressionEvaluator(
predictionCol="prediction",
labelCol="label",
)
evaluator.setMetricName(metric_name) # type: ignore
assert math.fabs(evaluator.evaluate(sdf) - metrics.evaluate((evaluator))) < 1e-6
| spark-rapids-ml-branch-23.10 | python/tests/test_metrics.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple, Union
import numpy as np
import pytest
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.tuning import CrossValidatorModel, ParamGridBuilder
from spark_rapids_ml.regression import RandomForestRegressor
from spark_rapids_ml.tuning import CrossValidator
from .sparksession import CleanSparkSession
from .utils import (
create_pyspark_dataframe,
feature_types,
idfn,
make_regression_dataset,
)
@pytest.mark.parametrize("feature_type", [feature_types.vector])
@pytest.mark.parametrize("data_type", [np.float32])
@pytest.mark.parametrize("data_shape", [(100, 8)], ids=idfn)
def test_crossvalidator(
tmp_path: str,
feature_type: str,
data_type: np.dtype,
data_shape: Tuple[int, int],
) -> None:
X, _, y, _ = make_regression_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
)
with CleanSparkSession() as spark:
df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X, y
)
assert label_col is not None
rfc = RandomForestRegressor()
rfc.setFeaturesCol(features_col)
rfc.setLabelCol(label_col)
evaluator = RegressionEvaluator()
evaluator.setLabelCol(label_col)
grid = ParamGridBuilder().addGrid(rfc.maxBins, [3, 5]).build()
cv = CrossValidator(
estimator=rfc,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=2,
seed=101,
)
def check_cv(cv_est: Union[CrossValidator, CrossValidatorModel]) -> None:
assert isinstance(cv_est, (CrossValidator, CrossValidatorModel))
assert isinstance(cv_est.getEstimator(), RandomForestRegressor)
assert isinstance(cv_est.getEvaluator(), RegressionEvaluator)
assert cv_est.getNumFolds() == 2
assert cv_est.getSeed() == 101
assert cv_est.getEstimatorParamMaps() == grid
check_cv(cv)
path = tmp_path + "/cv"
cv_path = f"{path}/cv"
cv.write().overwrite().save(cv_path)
cv_loaded = CrossValidator.load(cv_path)
check_cv(cv_loaded)
cv_model = cv.fit(df)
check_cv(cv_model)
cv_model_path = f"{path}/cv-model"
cv_model.write().overwrite().save(cv_model_path)
cv_model_loaded = CrossValidatorModel.load(cv_model_path)
check_cv(cv_model_loaded)
assert evaluator.evaluate(cv_model.transform(df)) == evaluator.evaluate(
cv_model_loaded.transform(df)
)
| spark-rapids-ml-branch-23.10 | python/tests/test_tuning.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| spark-rapids-ml-branch-23.10 | python/tests/__init__.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import json
from typing import Iterator, List, Tuple
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import make_blobs
from spark_rapids_ml.common.cuml_context import CumlContext
from spark_rapids_ml.core import _CumlCommon
from spark_rapids_ml.utils import _get_spark_session, _is_local
from .conftest import _gpu_number
from .sparksession import CleanSparkSession
from .utils import create_pyspark_dataframe, feature_types
@pytest.mark.parametrize("gpu_number_used", range(1, _gpu_number + 1))
def test_ucx_over_nccl(
gpu_number_used: int, data_shape: Tuple[int, int] = (1000, 20)
) -> None:
"""
If fails, try:
Run "export UCXPY_LOG_LEVEL=DEBUG" in termninal to enable UCX logging
"""
gpu_number = gpu_number_used
X, _ = make_blobs(n_samples=data_shape[0], n_features=data_shape[1], random_state=0)
with CleanSparkSession() as spark:
train_df, features_col, _ = create_pyspark_dataframe(
spark,
feature_type=feature_types.array,
dtype=np.float32, # type: ignore
data=X,
label=None,
)
dataset = train_df.repartition(gpu_number)
is_local = _is_local(_get_spark_session().sparkContext)
def _train_udf(pdf_iter: Iterator[pd.DataFrame]) -> pd.DataFrame:
from pyspark import BarrierTaskContext
context = BarrierTaskContext.get()
rank = context.partitionId()
# ucx requires nccl, and nccl initialization requires gpu assignment
_CumlCommon.set_gpu_device(context, is_local)
with CumlContext(
rank=rank,
nranks=gpu_number,
context=context,
enable=True,
require_ucx=True,
) as cc:
async def do_allGather() -> List[str]:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None, context.allGather, json.dumps("hello")
)
return result
assert cc._ucx is not None
assert cc._ucx_port is not None
assert cc._ucx_eps is not None
assert cc._loop is not None
assert len(cc._ucx_eps) == gpu_number
assert len(cc._ucx._server_endpoints) == gpu_number
cc._loop.run_until_complete(asyncio.ensure_future(do_allGather()))
for pdf in pdf_iter:
yield pdf
rdd = (
dataset.mapInPandas(_train_udf, schema=dataset.schema) # type: ignore
.rdd.barrier()
.mapPartitions(lambda x: x)
)
rdd.count()
| spark-rapids-ml-branch-23.10 | python/tests/test_ucx.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
from functools import lru_cache
from typing import Any, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
import numpy as np
import pyspark
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.sql.functions import array
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
from spark_rapids_ml.params import _CumlParams
from spark_rapids_ml.utils import _get_default_params_from_func, dtype_to_pyspark_type
FeatureTypes = namedtuple("FeatureTypes", ("vector", "array", "multi_cols"))
feature_types = FeatureTypes("vector", "array", "multi_cols")
pyspark_supported_feature_types = feature_types._fields
cuml_supported_data_types = [np.float32, np.float64]
CumlParams = TypeVar("CumlParams", bound=_CumlParams)
def idfn(val: Any) -> str:
"""Provide an API to provide display names for data type generators."""
return str(val)
def _make_regression_dataset_uncached(
nrows: int, ncols: int, **kwargs: Any
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Create regression dataset.
return X_train, X_test, y_train, y_test
"""
X, y = make_regression(**kwargs, n_samples=nrows, n_features=ncols, random_state=0)
return train_test_split(X, y, train_size=0.8, random_state=10)
@lru_cache(4)
def _make_regression_dataset_from_cache(
nrows: int, ncols: int, **kwargs: Any
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Cache the dataset"""
return _make_regression_dataset_uncached(nrows, ncols, **kwargs)
def make_regression_dataset(
datatype: np.dtype, nrows: int, ncols: int, **kwargs: Any
) -> Iterator[np.ndarray]:
"""Create regression dataset"""
if nrows * ncols < 1e8: # Keep cache under 4 GB
dataset = _make_regression_dataset_from_cache(nrows, ncols, **kwargs)
else:
dataset = _make_regression_dataset_uncached(nrows, ncols, **kwargs)
return map(lambda arr: arr.astype(datatype), dataset)
def create_pyspark_dataframe(
spark: SparkSession,
feature_type: str,
dtype: np.dtype,
data: np.ndarray,
label: Optional[np.ndarray] = None,
) -> Tuple[pyspark.sql.DataFrame, Union[str, List[str]], Optional[str]]:
"""Construct a dataframe based on features and label data."""
assert feature_type in pyspark_supported_feature_types
m, n = data.shape
pyspark_type = dtype_to_pyspark_type(dtype)
feature_cols: Union[str, List[str]] = [f"c{i}" for i in range(n)]
schema = [f"{c} {pyspark_type}" for c in feature_cols]
label_col = None
if label is not None:
label_col = "label_col"
schema.append(f"{label_col} {pyspark_type}")
df = spark.createDataFrame(
np.concatenate((data, label.reshape(m, 1)), axis=1).tolist(),
",".join(schema),
)
else:
df = spark.createDataFrame(data.tolist(), ",".join(schema))
if feature_type == feature_types.array:
df = df.withColumn("features", array(*feature_cols)).drop(*feature_cols)
feature_cols = "features"
elif feature_type == feature_types.vector:
df = (
VectorAssembler()
.setInputCols(feature_cols) # type: ignore
.setOutputCol("features")
.transform(df)
.drop(*feature_cols)
)
feature_cols = "features"
return df, feature_cols, label_col
def array_equal(
lhs: Union[np.ndarray, List[float]],
rhs: Union[np.ndarray, List[float]],
unit_tol: float = 1e-4,
total_tol: float = 0,
with_sign: bool = True,
) -> bool:
a = np.asarray(lhs)
b = np.asarray(rhs)
if len(a) == 0 and len(b) == 0:
return True
if not with_sign:
a, b = np.abs(a), np.abs(b)
res = (np.sum(np.abs(a - b) > unit_tol)) / a.size <= total_tol
return res
def assert_params(
instance: CumlParams, spark_params: Dict[str, Any], cuml_params: Dict[str, Any]
) -> None:
for key in spark_params:
if instance.hasParam(key):
if instance.isDefined(key):
actual = instance.getOrDefault(key)
expected = spark_params[key]
assert (
actual == expected
), f"Value of '{key}' Param was {actual}, expected {expected}."
elif spark_params[key] != None:
assert False, f"Value of {key} Param is undefined."
for key in cuml_params:
if key in instance.cuml_params:
actual = instance.cuml_params[key]
expected = cuml_params[key]
assert (
actual == expected
), f"Value of '{key}' cuml_param was {actual}, expected {expected}."
@lru_cache(4)
def _make_classification_dataset_from_cache(
nrows: int, ncols: int, **kwargs: Any
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Cache the dataset"""
return _make_classification_dataset_uncached(nrows, ncols, **kwargs)
def _make_classification_dataset_uncached(
nrows: int, ncols: int, **kwargs: Any
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Create classification dataset.
return X_train, X_test, y_train, y_test
"""
X, y = make_classification(
**kwargs, n_samples=nrows, n_features=ncols, random_state=0
)
return train_test_split(X, y, train_size=0.8, random_state=10)
def make_classification_dataset(
datatype: np.dtype, nrows: int, ncols: int, **kwargs: Any
) -> Iterator[np.ndarray]:
"""Create classification dataset"""
if nrows * ncols < 1e8: # Keep cache under 4 GB
dataset = _make_classification_dataset_from_cache(nrows, ncols, **kwargs)
else:
dataset = _make_classification_dataset_uncached(nrows, ncols, **kwargs)
return map(lambda arr: arr.astype(datatype), dataset)
def get_default_cuml_parameters(
cuml_classes: List[type], excludes: List[str] = []
) -> Dict[str, Any]:
params = {}
for cuml_cls in cuml_classes:
params.update(_get_default_params_from_func(cuml_cls, excludes))
return params
| spark-rapids-ml-branch-23.10 | python/tests/utils.py |
#
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Any, Dict, List, Tuple, Type, TypeVar, cast
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.functions import array_to_vector
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.ml.param import Param
from pyspark.ml.regression import LinearRegression as SparkLinearRegression
from pyspark.ml.regression import LinearRegressionModel as SparkLinearRegressionModel
from pyspark.ml.tuning import CrossValidator as SparkCrossValidator
from pyspark.ml.tuning import CrossValidatorModel, ParamGridBuilder
from pyspark.sql.functions import array, col
from pyspark.sql.types import DoubleType
from spark_rapids_ml.regression import LinearRegression, LinearRegressionModel
from spark_rapids_ml.tuning import CrossValidator
from .sparksession import CleanSparkSession
from .utils import (
array_equal,
assert_params,
create_pyspark_dataframe,
cuml_supported_data_types,
feature_types,
get_default_cuml_parameters,
idfn,
make_regression_dataset,
pyspark_supported_feature_types,
)
LinearRegressionType = TypeVar(
"LinearRegressionType", Type[LinearRegression], Type[SparkLinearRegression]
)
LinearRegressionModelType = TypeVar(
"LinearRegressionModelType",
Type[LinearRegressionModel],
Type[SparkLinearRegressionModel],
)
# @lru_cache(4) TODO fixme: TypeError: Unhashable Type” Numpy.Ndarray
def train_with_cuml_linear_regression(
X: np.ndarray,
y: np.ndarray,
alpha: float,
l1_ratio: float,
other_params: Dict[str, Any] = {},
) -> Any:
if alpha == 0:
from cuml import LinearRegression as cuLinearRegression
lr = cuLinearRegression(output_type="numpy", copy_X=False)
else:
if l1_ratio == 0.0:
from cuml import Ridge
lr = Ridge(output_type="numpy", alpha=alpha * len(y))
elif l1_ratio == 1.0:
from cuml import Lasso
lr = Lasso(output_type="numpy", alpha=alpha)
else:
from cuml import ElasticNet
lr = ElasticNet(
output_type="numpy", alpha=alpha, l1_ratio=l1_ratio, **other_params
)
lr.fit(X, y)
return lr
def test_default_cuml_params() -> None:
from cuml.linear_model.linear_regression import (
LinearRegression as CumlLinearRegression,
)
from cuml.linear_model.ridge import Ridge
from cuml.solvers import CD
cuml_params = get_default_cuml_parameters(
[CumlLinearRegression, Ridge, CD], ["handle", "output_type"]
)
spark_params = LinearRegression()._get_cuml_params_default()
import cuml
from packaging import version
if version.parse(cuml.__version__) < version.parse("23.08.00"):
spark_params.pop("copy_X")
assert cuml_params == spark_params
@pytest.mark.parametrize("reg", [0.0, 0.7])
def test_linear_regression_params(
tmp_path: str, reg: float, caplog: LogCaptureFixture
) -> None:
# Default params
default_spark_params = {
"elasticNetParam": 0.0,
"fitIntercept": True,
"loss": "squaredError",
"maxIter": 100,
"regParam": 0.0,
"solver": "auto",
"standardization": True,
"tol": 1e-06,
}
default_cuml_params = {
"algorithm": "eig",
"alpha": 0.0,
"fit_intercept": True,
"l1_ratio": 0.0,
"max_iter": 100,
"normalize": True,
"solver": "eig",
}
default_lr = LinearRegression()
assert_params(default_lr, default_spark_params, default_cuml_params)
# Spark ML Params
spark_params: Dict[str, Any] = {
"fitIntercept": False,
"standardization": False,
"regParam": reg,
"solver": "normal",
}
spark_lr = LinearRegression(**spark_params)
expected_spark_params = default_spark_params.copy()
expected_spark_params.update(spark_params)
expected_cuml_params = default_cuml_params.copy()
expected_cuml_params.update(
{
"alpha": reg,
"fit_intercept": False,
"normalize": False,
"solver": "eig",
}
)
assert_params(spark_lr, expected_spark_params, expected_cuml_params)
# Estimator persistence
path = tmp_path + "/linear_regression_tests"
estimator_path = f"{path}/linear_regression"
spark_lr.write().overwrite().save(estimator_path)
loaded_lr = LinearRegression.load(estimator_path)
assert_params(loaded_lr, expected_spark_params, expected_cuml_params)
# Unsupported value
spark_params = {"solver": "l-bfgs"}
with pytest.raises(
ValueError, match="Value 'l-bfgs' for 'solver' param is unsupported"
):
unsupported_lr = LinearRegression(**spark_params)
# make sure no warning when enabling float64 inputs
lr_float32 = LinearRegression(float32_inputs=False)
assert "float32_inputs to False" not in caplog.text
assert not lr_float32._float32_inputs
@pytest.mark.parametrize("data_type", ["byte", "short", "int", "long"])
def test_linear_regression_numeric_type(gpu_number: int, data_type: str) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
data = [
[1, 4, 4, 4, 0],
[2, 2, 2, 2, 1],
[3, 3, 3, 2, 2],
[3, 3, 3, 2, 3],
[5, 2, 1, 3, 4],
]
with CleanSparkSession() as spark:
feature_cols = ["c1", "c2", "c3", "c4"]
schema = (
", ".join([f"{c} {data_type}" for c in feature_cols])
+ f", label {data_type}"
)
df = spark.createDataFrame(data, schema=schema)
lr = LinearRegression(num_workers=gpu_number)
lr.setFeaturesCol(feature_cols)
lr.fit(df)
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("data_shape", [(10, 2)], ids=idfn)
@pytest.mark.parametrize("reg", [0.0, 0.7])
@pytest.mark.parametrize("float32_inputs", [True, False])
def test_linear_regression_basic(
gpu_number: int,
tmp_path: str,
feature_type: str,
data_type: np.dtype,
data_shape: Tuple[int, int],
reg: float,
float32_inputs: bool,
) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
# Train a toy model
X, _, y, _ = make_regression_dataset(data_type, data_shape[0], data_shape[1])
with CleanSparkSession() as spark:
df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X, y
)
lr = LinearRegression(num_workers=gpu_number, float32_inputs=float32_inputs)
lr.setRegParam(reg)
lr.setFeaturesCol(features_col)
assert lr.getFeaturesCol() == features_col
assert label_col is not None
lr.setLabelCol(label_col)
assert lr.getLabelCol() == label_col
def assert_cuml_pyspark_model(
lhs: LinearRegressionModel, rhs: SparkLinearRegressionModel
) -> None:
assert lhs.coefficients == rhs.coefficients
assert lhs.intercept == rhs.intercept
assert lhs.getRegParam() == rhs.getRegParam()
assert lhs.getRegParam() == reg
def assert_cuml_model(
lhs: LinearRegressionModel, rhs: LinearRegressionModel
) -> None:
assert lhs.coef_ == rhs.coef_
assert lhs.intercept_ == rhs.intercept_
assert lhs.coefficients == rhs.coefficients
assert lhs.intercept == rhs.intercept
# Vector type will be cast to array(double)
if float32_inputs:
assert lhs.dtype == "float32"
elif feature_type == "vector" and not float32_inputs:
assert lhs.dtype == np.dtype(np.float64).name
else:
assert lhs.dtype == np.dtype(data_type).name
assert lhs.dtype == rhs.dtype
assert lhs.n_cols == rhs.n_cols
assert lhs.n_cols == data_shape[1]
# train a model
lr_model = lr.fit(df)
assert (
lr_model.transform(df).schema[lr.getPredictionCol()].dataType
== DoubleType()
)
assert isinstance(lr_model.cpu(), SparkLinearRegressionModel)
assert_cuml_pyspark_model(lr_model, lr_model.cpu())
# Convert input to vector dataframe to fit in the Spark LinearRegressionModel
if feature_type == feature_types.array:
vector_df = df.select(array_to_vector(col(features_col)).alias("features")) # type: ignore
elif feature_type == feature_types.multi_cols:
assembler = (
VectorAssembler().setInputCols(features_col).setOutputCol("features") # type: ignore
)
vector_df = assembler.transform(df).drop(*features_col)
else:
vector_df = df
# transform without throwing exception
lr_model.cpu().transform(vector_df).collect()
# model persistence
path = tmp_path + "/linear_regression_tests"
model_path = f"{path}/linear_regression_model"
lr_model.write().overwrite().save(model_path)
lr_model_loaded = LinearRegressionModel.load(model_path)
assert isinstance(lr_model_loaded.cpu(), SparkLinearRegressionModel)
assert_cuml_pyspark_model(lr_model_loaded, lr_model_loaded.cpu())
assert_cuml_model(lr_model, lr_model_loaded)
# transform without throwing exception
lr_model_loaded.cpu().transform(vector_df).collect()
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.parametrize("data_shape", [(1000, 20)], ids=idfn)
@pytest.mark.parametrize("data_type", cuml_supported_data_types)
@pytest.mark.parametrize("max_record_batch", [100, 10000])
@pytest.mark.parametrize("alpha", [0.0, 0.7]) # equal to reg parameter
@pytest.mark.parametrize(
"l1_ratio_and_other_params",
[
(0.0, {}), # LinearRegression
(0.5, {"tol": 1e-5}), # ElasticNet
(1.0, {"tol": 1e-5}), # Lasso
],
)
@pytest.mark.slow
def test_linear_regression(
gpu_number: int,
feature_type: str,
data_shape: Tuple[int, int],
data_type: np.dtype,
max_record_batch: int,
alpha: float,
l1_ratio_and_other_params: Tuple[float, Dict[str, Any]],
) -> None:
X_train, X_test, y_train, _ = make_regression_dataset(
data_type, data_shape[0], data_shape[1]
)
l1_ratio, other_params = l1_ratio_and_other_params
cu_lr = train_with_cuml_linear_regression(
X_train, y_train, alpha, l1_ratio, other_params
)
cu_expected = cu_lr.predict(X_test)
conf = {"spark.sql.execution.arrow.maxRecordsPerBatch": str(max_record_batch)}
with CleanSparkSession(conf) as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
slr = LinearRegression(num_workers=gpu_number, verbose=7, **other_params)
slr.setRegParam(alpha)
slr.setStandardization(
False
) # Spark default is True, but Cuml default is False
slr.setElasticNetParam(l1_ratio)
slr.setFeaturesCol(features_col)
slr.setLabelCol(label_col)
slr_model: LinearRegressionModel = slr.fit(train_df)
assert slr_model.cpu().getElasticNetParam() == l1_ratio
assert slr_model.cpu().getRegParam() == alpha
assert not slr_model.cpu().getStandardization()
assert slr_model.cpu().getLabelCol() == label_col
assert array_equal(cu_lr.coef_, cast(list, slr_model.coef_), 1e-3)
assert array_equal(cu_lr.coef_, slr_model.coefficients.toArray(), 1e-3)
test_df, _, _ = create_pyspark_dataframe(spark, feature_type, data_type, X_test)
result = slr_model.transform(test_df).collect()
pred_result = [row.prediction for row in result]
assert array_equal(cu_expected, pred_result, 1e-3)
params_exception = [
# params, if throwing exception
({"alpha": 0}, True), # LinearRegression throws exception
({"alpha": 0.5, "l1_ratio": 0}, True), # Ridge throws exception
({"alpha": 0.5, "l1_ratio": 0.5}, False), # ElasticNet and Lasso can work
]
@pytest.mark.compat
@pytest.mark.parametrize(
"lr_types",
[
(SparkLinearRegression, SparkLinearRegressionModel),
(LinearRegression, LinearRegressionModel),
],
)
def test_linear_regression_spark_compat(
lr_types: Tuple[LinearRegressionType, LinearRegressionModelType],
tmp_path: str,
) -> None:
_LinearRegression, _LinearRegressionModel = lr_types
X = np.array(
[
[-0.20515826, 1.4940791],
[0.12167501, 0.7610377],
[1.4542735, 0.14404356],
[-0.85409576, 0.3130677],
[2.2408931, 0.978738],
[-0.1513572, 0.95008844],
[-0.9772779, 1.867558],
[0.41059852, -0.10321885],
]
)
weight = np.ones([8])
y = np.array(
[
2.0374513,
22.403986,
139.4456,
-76.19584,
225.72075,
-0.6784152,
-65.54835,
37.30829,
]
)
feature_cols = ["c0", "c1"]
schema = ["c0 float, c1 float, weight float, label float"]
with CleanSparkSession() as spark:
df = spark.createDataFrame(
np.concatenate((X, weight.reshape(8, 1), y.reshape(8, 1)), axis=1).tolist(),
",".join(schema),
)
df = df.withColumn("features", array_to_vector(array(*feature_cols))).drop(
*feature_cols
)
lr = _LinearRegression(regParam=0.1, solver="normal")
assert lr.getRegParam() == 0.1
lr.setFeaturesCol("features")
lr.setMaxIter(5)
lr.setRegParam(0.0)
lr.setLabelCol("label")
if isinstance(lr, SparkLinearRegression):
lr.setWeightCol("weight")
assert lr.getFeaturesCol() == "features"
assert lr.getMaxIter() == 5
assert lr.getRegParam() == 0.0
assert lr.getLabelCol() == "label"
model = lr.fit(df)
coefficients = model.coefficients.toArray()
expected_coefficients = [94.46689350900762, 14.33532962562045]
assert array_equal(coefficients, expected_coefficients)
intercept = model.intercept
assert np.isclose(intercept, -3.3089753423400734e-07, atol=1.0e-4)
example = df.head()
if example:
model.predict(example.features)
model.setPredictionCol("prediction")
output_df = model.transform(df)
assert isinstance(output_df.schema["features"].dataType, VectorUDT)
output = output_df.head()
# Row(weight=1.0, label=2.0374512672424316, features=DenseVector([-0.2052, 1.4941]), prediction=2.037452415464224)
assert np.isclose(output.prediction, 2.037452415464224)
lr_path = tmp_path + "/lr"
lr.save(lr_path)
lr2 = _LinearRegression.load(lr_path)
assert lr2.getMaxIter() == 5
model_path = tmp_path + "/lr_model"
model.save(model_path)
model2 = _LinearRegressionModel.load(model_path)
assert model.coefficients.toArray()[0] == model2.coefficients.toArray()[0]
assert model.intercept == model2.intercept
assert model.transform(df).take(1) == model2.transform(df).take(1)
assert model.numFeatures == 2
@pytest.mark.parametrize("params_exception", params_exception)
def test_fail_run_on_1_col(
gpu_number: int, params_exception: Tuple[Dict[str, Any], bool]
) -> None:
# reduce the number of GPUs for toy dataset to avoid empty partition
gpu_number = min(gpu_number, 2)
params, exception = params_exception
with CleanSparkSession() as spark:
df = spark.createDataFrame(
[
(1.0, Vectors.dense(1.0)),
(0.0, Vectors.sparse(1, [], [])),
(1.0, Vectors.dense(1.0)),
(0.0, Vectors.sparse(1, [], [])),
],
["label", "features"],
)
lr = LinearRegression(num_workers=gpu_number, **params)
if exception:
with pytest.raises(
RuntimeError,
match="LinearRegression doesn't support training data with 1 column",
):
lr.fit(df)
else:
lr.fit(df)
@pytest.mark.parametrize("feature_type", [feature_types.vector])
@pytest.mark.parametrize("data_type", [np.float32])
def test_lr_fit_multiple_in_single_pass(
feature_type: str,
data_type: np.dtype,
) -> None:
X_train, _, y_train, _ = make_regression_dataset(
datatype=data_type,
nrows=100,
ncols=5,
)
with CleanSparkSession() as spark:
train_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X_train, y_train
)
assert label_col is not None
lr = LinearRegression()
lr.setFeaturesCol(features_col)
lr.setLabelCol(label_col)
initial_lr = lr.copy()
param_maps: List[Dict[Param, Any]] = [
# alpha = 0, LinearRegression
{
lr.tol: 0.00001,
lr.standardization: False,
lr.loss: "squared_loss",
lr.regParam: 0,
lr.elasticNetParam: 0,
lr.fitIntercept: True,
lr.maxIter: 39,
lr.solver: "auto",
},
# Ridge
{
lr.tol: 0.00002,
lr.standardization: True,
lr.loss: "squared_loss",
lr.regParam: 0.2,
lr.elasticNetParam: 0,
lr.fitIntercept: True,
lr.maxIter: 29,
lr.solver: "auto",
},
# Lasso
{
lr.tol: 0.00003,
lr.standardization: False,
lr.loss: "squared_loss",
lr.regParam: 0.3,
lr.elasticNetParam: 1,
lr.fitIntercept: True,
lr.maxIter: 59,
lr.solver: "auto",
},
# ElasticNet
{
lr.tol: 0.00004,
lr.standardization: False,
lr.loss: "squared_loss",
lr.regParam: 0.5,
lr.elasticNetParam: 0.6,
lr.fitIntercept: False,
lr.maxIter: 69,
lr.solver: "auto",
},
]
models = lr.fit(train_df, param_maps)
for i, param_map in enumerate(param_maps):
rf = initial_lr.copy()
single_model = rf.fit(train_df, param_map)
assert single_model.coefficients == models[i].coefficients
assert single_model.intercept == models[i].intercept
for k, v in param_map.items():
assert models[i].getOrDefault(k.name) == v
assert single_model.getOrDefault(k.name) == v
@pytest.mark.parametrize("feature_type", [feature_types.vector])
@pytest.mark.parametrize("data_type", [np.float32])
@pytest.mark.parametrize("data_shape", [(100, 8)], ids=idfn)
def test_crossvalidator_linear_regression(
feature_type: str,
data_type: np.dtype,
data_shape: Tuple[int, int],
) -> None:
# Train a toy model
X, _, y, _ = make_regression_dataset(
datatype=data_type,
nrows=data_shape[0],
ncols=data_shape[1],
)
with CleanSparkSession() as spark:
df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, data_type, X, y
)
assert label_col is not None
lr = LinearRegression()
lr.setFeaturesCol(features_col)
lr.setLabelCol(label_col)
evaluator = RegressionEvaluator()
evaluator.setLabelCol(label_col)
grid = (
ParamGridBuilder()
.addGrid(lr.regParam, [0, 0.2])
.addGrid(lr.elasticNetParam, [0, 0.5, 1])
.build()
)
cv = CrossValidator(
estimator=lr,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=2,
seed=1,
)
# without exception
model: CrossValidatorModel = cv.fit(df)
spark_cv = SparkCrossValidator(
estimator=lr,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=2,
seed=1,
)
spark_cv_model = spark_cv.fit(df)
assert array_equal(model.avgMetrics, spark_cv_model.avgMetrics)
| spark-rapids-ml-branch-23.10 | python/tests/test_linear_model.py |
#
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from typing import List, Tuple, Union
import cupy as cp
import numpy as np
import pytest
from _pytest.logging import LogCaptureFixture
from cuml.metrics import trustworthiness
from pyspark.sql.functions import array
from sklearn.datasets import load_digits, load_iris
from spark_rapids_ml.umap import UMAP, UMAPModel
from .sparksession import CleanSparkSession
from .utils import (
assert_params,
create_pyspark_dataframe,
cuml_supported_data_types,
pyspark_supported_feature_types,
)
def _load_dataset(dataset: str, n_rows: int) -> Tuple[np.ndarray, np.ndarray]:
if dataset == "digits":
local_X, local_y = load_digits(return_X_y=True)
else: # dataset == "iris"
local_X, local_y = load_iris(return_X_y=True)
local_X = cp.asarray(local_X)
local_y = cp.asarray(local_y)
local_X = local_X.repeat(math.ceil(n_rows / len(local_X)), axis=0)
local_y = local_y.repeat(math.ceil(n_rows / len(local_y)), axis=0)
# Add some gaussian noise
local_X += cp.random.standard_normal(local_X.shape, dtype=cp.float32)
return local_X, local_y
def _local_umap_trustworthiness(
local_X: np.ndarray,
local_y: np.ndarray,
n_neighbors: int,
supervised: bool,
) -> float:
from cuml.manifold import UMAP
local_model = UMAP(n_neighbors=n_neighbors, random_state=42, init="random")
y_train = local_y if supervised else None
local_model.fit(local_X, y=y_train)
embedding = local_model.transform(local_X)
return trustworthiness(local_X, embedding, n_neighbors=n_neighbors, batch_size=5000)
def _spark_umap_trustworthiness(
local_X: np.ndarray,
local_y: np.ndarray,
n_neighbors: int,
supervised: bool,
n_parts: int,
gpu_number: int,
sampling_ratio: float,
dtype: np.dtype,
feature_type: str,
) -> float:
umap_estimator = UMAP(
n_neighbors=n_neighbors,
random_state=42,
init="random",
num_workers=gpu_number,
)
with CleanSparkSession() as spark:
if supervised:
data_df, features_col, label_col = create_pyspark_dataframe(
spark, feature_type, dtype, local_X, local_y
)
assert label_col is not None
umap_estimator.setLabelCol(label_col)
else:
data_df, features_col, _ = create_pyspark_dataframe(
spark, feature_type, dtype, local_X, None
)
data_df = data_df.repartition(n_parts)
umap_estimator.setFeaturesCol(features_col).setSampleFraction(sampling_ratio)
umap_model = umap_estimator.fit(data_df)
pdf = umap_model.transform(data_df).toPandas()
embedding = cp.asarray(pdf["embedding"].to_list()).astype(cp.float32)
input = cp.asarray(pdf["features"].to_list()).astype(cp.float32)
return trustworthiness(input, embedding, n_neighbors=n_neighbors, batch_size=5000)
def _run_spark_test(
n_parts: int,
gpu_number: int,
n_rows: int,
sampling_ratio: float,
supervised: bool,
dataset: str,
n_neighbors: int,
dtype: np.dtype,
feature_type: str,
) -> bool:
local_X, local_y = _load_dataset(dataset, n_rows)
dist_umap = _spark_umap_trustworthiness(
local_X,
local_y,
n_neighbors,
supervised,
n_parts,
gpu_number,
sampling_ratio,
dtype,
feature_type,
)
loc_umap = _local_umap_trustworthiness(local_X, local_y, n_neighbors, supervised)
print("Local UMAP trustworthiness score : {:.2f}".format(loc_umap))
print("Spark UMAP trustworthiness score : {:.2f}".format(dist_umap))
trust_diff = loc_umap - dist_umap
return trust_diff <= 0.15
@pytest.mark.parametrize("n_parts", [2, 9])
@pytest.mark.parametrize("n_rows", [100, 500])
@pytest.mark.parametrize("sampling_ratio", [0.55, 0.9])
@pytest.mark.parametrize("supervised", [True, False])
@pytest.mark.parametrize("dataset", ["digits", "iris"])
@pytest.mark.parametrize("n_neighbors", [10])
@pytest.mark.parametrize("dtype", cuml_supported_data_types)
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
@pytest.mark.slow
def test_spark_umap(
n_parts: int,
gpu_number: int,
n_rows: int,
sampling_ratio: float,
supervised: bool,
dataset: str,
n_neighbors: int,
dtype: np.dtype,
feature_type: str,
) -> None:
result = _run_spark_test(
n_parts,
gpu_number,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
dtype,
feature_type,
)
if not result:
result = _run_spark_test(
n_parts,
gpu_number,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
dtype,
feature_type,
)
assert result
@pytest.mark.parametrize("n_parts", [5])
@pytest.mark.parametrize("n_rows", [500])
@pytest.mark.parametrize("sampling_ratio", [0.7])
@pytest.mark.parametrize("supervised", [True])
@pytest.mark.parametrize("dataset", ["digits"])
@pytest.mark.parametrize("n_neighbors", [10])
@pytest.mark.parametrize("dtype", [cuml_supported_data_types[0]])
@pytest.mark.parametrize("feature_type", pyspark_supported_feature_types)
def test_spark_umap_fast(
n_parts: int,
gpu_number: int,
n_rows: int,
sampling_ratio: float,
supervised: bool,
dataset: str,
n_neighbors: int,
dtype: np.dtype,
feature_type: str,
caplog: LogCaptureFixture,
) -> None:
result = _run_spark_test(
n_parts,
gpu_number,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
dtype,
feature_type,
)
if not result:
result = _run_spark_test(
n_parts,
gpu_number,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
dtype,
feature_type,
)
assert result
assert UMAP()._float32_inputs
# float32_inputs warn, umap only accepts float32
umap_float32 = UMAP(float32_inputs=False)
# float32_inputs warn, umap only accepts float32
assert "float32_inputs to False" in caplog.text
assert umap_float32._float32_inputs
def test_umap_estimator_persistence(tmp_path: str) -> None:
# Default constructor
default_cuml_params = {
"n_neighbors": 15,
"n_components": 2,
"metric": "euclidean",
"n_epochs": None,
"learning_rate": 1.0,
"init": "spectral",
"min_dist": 0.1,
"spread": 1.0,
"set_op_mix_ratio": 1.0,
"local_connectivity": 1.0,
"repulsion_strength": 1.0,
"negative_sample_rate": 5,
"transform_queue_size": 4.0,
"a": None,
"b": None,
"precomputed_knn": None,
"random_state": None,
"verbose": False,
}
default_umap = UMAP()
assert_params(default_umap, {}, default_cuml_params)
# Estimator persistence
path = tmp_path + "/umap_tests"
estimator_path = f"{path}/umap"
default_umap.write().overwrite().save(estimator_path)
loaded_umap = UMAP.load(estimator_path)
assert_params(loaded_umap, {}, default_cuml_params)
assert loaded_umap._float32_inputs
def test_umap_model_persistence(gpu_number: int, tmp_path: str) -> None:
from cuml.datasets import make_blobs
X, _ = make_blobs(
100,
20,
centers=42,
cluster_std=0.1,
dtype=np.float32,
random_state=10,
)
with CleanSparkSession() as spark:
pyspark_type = "float"
feature_cols = [f"c{i}" for i in range(X.shape[1])]
schema = [f"{c} {pyspark_type}" for c in feature_cols]
df = spark.createDataFrame(X.tolist(), ",".join(schema))
df = df.withColumn("features", array(*feature_cols)).drop(*feature_cols)
umap = UMAP(num_workers=gpu_number).setFeaturesCol("features")
def assert_umap_model(model: UMAPModel) -> None:
embedding = np.array(model.embedding)
raw_data = np.array(model.raw_data)
assert embedding.shape == (100, 2)
assert raw_data.shape == (100, 20)
assert np.array_equal(raw_data, X.get())
assert model.dtype == "float32"
assert model.n_cols == X.shape[1]
umap_model = umap.fit(df)
assert_umap_model(model=umap_model)
# Model persistence
path = tmp_path + "/umap_tests"
model_path = f"{path}/umap_model"
umap_model.write().overwrite().save(model_path)
umap_model_loaded = UMAPModel.load(model_path)
assert_umap_model(model=umap_model_loaded)
@pytest.mark.parametrize("BROADCAST_LIMIT", [8 << 20, 8 << 18])
def test_umap_broadcast_chunks(gpu_number: int, BROADCAST_LIMIT: int) -> None:
from cuml.datasets import make_blobs
X, _ = make_blobs(
5000,
3000,
centers=42,
cluster_std=0.1,
dtype=np.float32,
random_state=10,
)
with CleanSparkSession() as spark:
pyspark_type = "float"
feature_cols = [f"c{i}" for i in range(X.shape[1])]
schema = [f"{c} {pyspark_type}" for c in feature_cols]
df = spark.createDataFrame(X.tolist(), ",".join(schema))
df = df.withColumn("features", array(*feature_cols)).drop(*feature_cols)
umap = UMAP(num_workers=gpu_number).setFeaturesCol("features")
umap.BROADCAST_LIMIT = BROADCAST_LIMIT
umap_model = umap.fit(df)
def assert_umap_model(model: UMAPModel) -> None:
embedding = np.array(model.embedding)
raw_data = np.array(model.raw_data)
assert embedding.shape == (5000, 2)
assert raw_data.shape == (5000, 3000)
assert np.array_equal(raw_data, X.get())
assert model.dtype == "float32"
assert model.n_cols == X.shape[1]
assert_umap_model(model=umap_model)
pdf = umap_model.transform(df).toPandas()
embedding = cp.asarray(pdf["embedding"].to_list()).astype(cp.float32)
input = cp.asarray(pdf["features"].to_list()).astype(cp.float32)
dist_umap = trustworthiness(input, embedding, n_neighbors=15, batch_size=5000)
loc_umap = _local_umap_trustworthiness(X, np.zeros(0), 15, False)
trust_diff = loc_umap - dist_umap
assert trust_diff <= 0.15
| spark-rapids-ml-branch-23.10 | python/tests/test_umap.py |
Subsets and Splits