python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import collections
import logging
import numpy as np
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.utils import model_utils
from nvidia_tao_tf2.cv.core import argmax_matcher
from nvidia_tao_tf2.cv.core import box_list
from nvidia_tao_tf2.cv.core import faster_rcnn_box_coder
from nvidia_tao_tf2.cv.core import region_similarity_calculator
from nvidia_tao_tf2.cv.core import target_assigner
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
# The minimal score threshold.
MIN_SCORE_THRESH = 0.4
logger = logging.getLogger(__name__)
def decode_box_outputs(pred_boxes, anchor_boxes):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
pred_boxes: predicted box regression targets.
anchor_boxes: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
anchor_boxes = tf.cast(anchor_boxes, pred_boxes.dtype)
ycenter_a = (anchor_boxes[..., 0] + anchor_boxes[..., 2]) / 2
xcenter_a = (anchor_boxes[..., 1] + anchor_boxes[..., 3]) / 2
ha = anchor_boxes[..., 2] - anchor_boxes[..., 0]
wa = anchor_boxes[..., 3] - anchor_boxes[..., 1]
ty, tx, th, tw = tf.unstack(pred_boxes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,
aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
feat_sizes: list of dict of integer numbers of feature map sizes.
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),
feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),
scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: tuple of integer numbers of input image size.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale
anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0
x = np.arange(stride[1] / 2, image_size[1], stride[1])
y = np.arange(stride[0] / 2, image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# print(boxes_level)
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
class Anchors(object):
"""RetinaNet Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale RetinaNet anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = model_utils.parse_image_size(image_size)
self.feat_sizes = model_utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.feat_sizes, self.min_level,
self.max_level, self.num_scales,
self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
"""Get number of anchors per location."""
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = feat_size['height'] * feat_size[
'width'] * anchors.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size['height'], feat_size['width'], -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
def generate_detections(self,
cls_outputs,
box_outputs,
indices,
classes,
image_id,
image_scale,
image_size=None,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
softnms=False,
disable_pyfun=None):
"""Generate detections based on class and box predictions."""
# disable_pyfun = False
if disable_pyfun:
return _generate_detections_tf(
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw)
return tf.compat.v1.py_func(_generate_detections, [
cls_outputs, box_outputs, self._anchors.boxes, indices, classes,
image_id, image_scale, self._num_classes, max_boxes_to_draw, softnms
], tf.float32)
def _generate_detections_tf(cls_outputs,
box_outputs,
anchor_boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
soft_nms_sigma=0.0,
iou_threshold=0.5,
use_native_nms=True):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
image_size: a tuple (height, width) or an integer for image size.
min_score_thresh: A float representing the threshold for deciding when to
remove boxes based on score.
max_boxes_to_draw: Max number of boxes to draw.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
use_native_nms: a bool that indicates whether to use native nms.
Returns:
detections: detection results in a tensor with each row representing
[image_id, ymin, xmin, ymax, xmax, score, class]
"""
if not image_size:
raise ValueError('tf version generate_detection needs non-empty image_size')
logger.info('Using tf version of post-processing.')
anchor_boxes = tf.gather(anchor_boxes, indices)
scores = tf.math.sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs_tf(box_outputs, anchor_boxes)
if use_native_nms:
logger.info('Using native nms.')
top_detection_idx, scores = tf.image.non_max_suppression_with_scores(
boxes,
scores,
max_boxes_to_draw,
iou_threshold=iou_threshold,
score_threshold=min_score_thresh,
soft_nms_sigma=soft_nms_sigma)
boxes = tf.gather(boxes, top_detection_idx)
else:
logger.info('Using customized nms.')
scores = tf.expand_dims(scores, axis=1)
all_detections = tf.concat([boxes, scores], axis=1)
top_detection_idx = nms_tf(all_detections, iou_threshold)
detections = tf.gather(all_detections, top_detection_idx)
scores = detections[:, 4]
boxes = detections[:, :4]
image_size = model_utils.parse_image_size(image_size)
detections = tf.stack([
tf.cast(tf.tile(image_id, tf.shape(top_detection_idx)), tf.float32),
tf.clip_by_value(boxes[:, 0], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 1], 0, image_size[1]) * image_scale,
tf.clip_by_value(boxes[:, 2], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 3], 0, image_size[1]) * image_scale,
scores,
tf.cast(tf.gather(classes, top_detection_idx) + 1, tf.float32)
], axis=1)
return detections
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,
classes, image_id, image_scale, num_classes,
max_boxes_to_draw, softnms):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
num_classes: a integer that indicates the number of classes.
max_boxes_to_draw: max number of boxes to draw per image.
softnms: whether to use softnms algorithm
Returns:
detections: detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
"""
anchor_boxes = anchor_boxes[indices, :]
scores = sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs(
box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))
boxes = boxes[:, [1, 0, 3, 2]]
# run class-wise nms
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
if softnms:
top_detections_cls = soft_nms(all_detections_cls)
top_len = len(top_detections_cls)
else:
top_detection_idx = nms(all_detections_cls, 0.5)
top_detections_cls = all_detections_cls[top_detection_idx]
top_len = len(top_detection_idx)
top_detections_cls[:, 2] -= top_detections_cls[:, 0]
top_detections_cls[:, 3] -= top_detections_cls[:, 1]
top_detections_cls = np.column_stack(
(np.repeat(image_id, top_len),
top_detections_cls,
np.repeat(c + 1, top_len))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections
def sigmoid(x):
"""Sigmoid function for use with Numpy for CPU evaluation."""
return 1 / (1 + np.exp(-x))
# def decode_box_outputs(rel_codes, anchors):
# """Transforms relative regression coordinates to absolute positions.
# Network predictions are normalized and relative to a given anchor; this
# reverses the transformation and outputs absolute coordinates for the input
# image.
# Args:
# rel_codes: box regression targets.
# anchors: anchors on all feature levels.
# Returns:
# outputs: bounding boxes.
# """
# ycenter_a = (anchors[0] + anchors[2]) / 2
# xcenter_a = (anchors[1] + anchors[3]) / 2
# ha = anchors[2] - anchors[0]
# wa = anchors[3] - anchors[1]
# ty, tx, th, tw = rel_codes
# w = np.exp(tw) * wa
# h = np.exp(th) * ha
# ycenter = ty * ha + ycenter_a
# xcenter = tx * wa + xcenter_a
# ymin = ycenter - h / 2.
# xmin = xcenter - w / 2.
# ymax = ycenter + h / 2.
# xmax = xcenter + w / 2.
# return np.column_stack([ymin, xmin, ymax, xmax])
def decode_box_outputs_tf(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2
xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
ty, tx, th, tw = tf.unstack(rel_codes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
# @tf.autograph.to_graph
def nms_tf(dets, thresh):
"""Non-maximum suppression with tf graph mode."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = tf.argsort(scores, direction='DESCENDING')
keep = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
index = 0
while tf.shape(order)[0] > 0:
i = order[0]
keep = keep.write(index, i)
xx1 = tf.maximum(x1[i], tf.gather(x1, order[1:]))
yy1 = tf.maximum(y1[i], tf.gather(y1, order[1:]))
xx2 = tf.minimum(x2[i], tf.gather(x2, order[1:]))
yy2 = tf.minimum(y2[i], tf.gather(y2, order[1:]))
w = tf.maximum(0.0, xx2 - xx1 + 1)
h = tf.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (
areas[i] + tf.gather(areas, order[1:]) - intersection)
inds = tf.where_v2(overlap <= thresh)
order = tf.concat(tf.gather(order, inds + 1), axis=1)
order = tf.squeeze(order, axis=-1)
index += 1
return keep.stack()
def nms(dets, thresh):
"""Non-maximum suppression."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= thresh)[0]
order = order[inds + 1]
return keep
def soft_nms(dets):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = 'gaussian'
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = 0.5
iou_thresh = 0.3
score_thresh = 0.001
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/anchors.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
from typing import Text
import tensorflow as tf
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
return x * tf.keras.backend.sigmoid(x)
def srelu_fn(x):
"""Smooth relu: a smooth version of relu."""
with tf.name_scope('srelu'):
beta = tf.Variable(20.0, name='srelu_beta', dtype=tf.float32)**2
beta = tf.cast(beta**2, x.dtype)
safe_log = tf.math.log(tf.where(x > 0., beta * x + 1., tf.ones_like(x)))
return tf.where((x > 0.), x - (1. / beta) * safe_log, tf.zeros_like(x))
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type in ('silu', 'swish'):
return tf.keras.layers.Activation(swish)(features)
if act_type == 'swish_native':
return tf.keras.layers.Activation(swish)(features)
if act_type == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
if act_type == 'mish':
return features * tf.math.tanh(tf.math.softplus(features))
if act_type == 'identity':
return tf.identity(features)
if act_type == 'srelu':
return srelu_fn(features)
raise ValueError(f'Unsupported act_type {act_type}')
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/model/activation_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util class for creating image batches."""
import os
import sys
import numpy as np
from PIL import Image
class ImageBatcher:
"""Creates batches of pre-processed images."""
def __init__(self, input, shape, dtype, # noqa pylint: disable=W0622
max_num_images=None, exact_batches=False, preprocessor="EfficientDet"):
"""Initialize.
:param input: The input directory to read images from.
:param shape: The tensor shape of the batch to prepare, either in NCHW or NHWC format.
:param dtype: The (numpy) datatype to cast the batched data to.
:param max_num_images: The maximum number of images to read from the directory.
:param exact_batches: This defines how to handle a number of images that is not an exact
multiple of the batch size. If false, it will pad the final batch with zeros to reach
the batch size. If true, it will *remove* the last few images in excess of a batch size
multiple, to guarantee batches are exact (useful for calibration).
:param preprocessor: Set the preprocessor to use, depending on which network is being used.
"""
# Find images in the given input path
input = os.path.realpath(input)
self.images = []
extensions = [".jpg", ".jpeg", ".png", ".bmp"]
def is_image(path):
return os.path.isfile(path) and os.path.splitext(path)[1].lower() in extensions
if os.path.isdir(input):
self.images = [os.path.join(input, f) for f in os.listdir(input)
if is_image(os.path.join(input, f))]
self.images.sort()
elif os.path.isfile(input):
if is_image(input):
self.images.append(input)
self.num_images = len(self.images)
if self.num_images < 1:
exts = "/".join(extensions)
print(f"No valid {exts} images found in {input}")
sys.exit(1)
# Handle Tensor Shape
self.dtype = dtype
self.shape = shape
assert len(self.shape) == 4
self.batch_size = shape[0]
assert self.batch_size > 0
self.format = None
self.width = -1
self.height = -1
if self.shape[1] == 3:
self.format = "NCHW"
self.height = self.shape[2]
self.width = self.shape[3]
elif self.shape[3] == 3:
self.format = "NHWC"
self.height = self.shape[1]
self.width = self.shape[2]
assert all([self.format, self.width > 0, self.height > 0])
# Adapt the number of images as needed
if max_num_images and 0 < max_num_images < len(self.images):
self.num_images = max_num_images
if exact_batches:
self.num_images = self.batch_size * (self.num_images // self.batch_size)
if self.num_images < 1:
print("Not enough images to create batches")
sys.exit(1)
self.images = self.images[0:self.num_images]
# Subdivide the list of images into batches
self.num_batches = 1 + int((self.num_images - 1) / self.batch_size)
self.batches = []
for i in range(self.num_batches):
start = i * self.batch_size
end = min(start + self.batch_size, self.num_images)
self.batches.append(self.images[start:end])
# Indices
self.image_index = 0
self.batch_index = 0
self.preprocessor = preprocessor
def preprocess_image(self, image_path):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* EfficientDet: Resizes and pads the image to fit the input size.
:param image_path: The path to the image on disk to load.
:return: Two values: A numpy array holding the image sample, ready to be contacatenated
into the rest of the batch, and the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
:param image: The PIL image object
:pad_color: The RGB values to use for the padded area. Default: Black/Zeros.
:return: Two values: The PIL image object already padded and cropped,
and the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
if self.preprocessor == "EfficientDet":
# For EfficientNet V2: Resize & Pad with ImageNet mean values
# and keep as [0,255] Normalization
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
# [0-1] Normalization, Mean subtraction and Std Dev scaling are
# part of the EfficientDet graph, so no need to do it during preprocessing here
else:
print(f"Preprocessing method {self.preprocessor} not supported.")
sys.exit(1)
if self.format == "NCHW":
image = np.transpose(image, (2, 0, 1))
return image, scale
def get_batch(self):
"""Retrieve the batches.
This is a generator object, so you can use it within a loop as:
for batch, images in batcher.get_batch():
...
Or outside of a batch with the next() function.
:return: A generator yielding three items per iteration: a numpy array holding
a batch of images, the list of paths to the images loaded within this batch,
and the list of resize scales for each image in the batch.
"""
for i, batch_images in enumerate(self.batches):
batch_data = np.zeros(self.shape, dtype=self.dtype)
batch_scales = [None] * len(batch_images)
for i, image in enumerate(batch_images):
self.image_index += 1
batch_data[i], batch_scales[i] = self.preprocess_image(image)
self.batch_index += 1
yield batch_data, batch_images, batch_scales
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/exporter/image_batcher.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/exporter/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet ONNX exporter."""
import copy
import logging
import os
import tempfile
import numpy as np
import onnx
from onnx import numpy_helper
from onnx import shape_inference
import onnx_graphsurgeon as gs
import tensorflow as tf
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import tensor_util
from tf2onnx import optimizer, tf_loader, tf_utils, tfonnx
from tf2onnx.utils import make_sure
from nvidia_tao_tf2.cv.efficientdet.exporter import onnx_utils # noqa pylint: disable=W0611
logger = logging.getLogger(__name__)
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
make_sure(isinstance(tensor, tensor_pb2.TensorProto), "Require TensorProto")
np_data = tensor_util.MakeNdarray(tensor)
make_sure(isinstance(np_data, np.ndarray), "%r isn't ndarray", np_data)
return np_data
def tf_to_onnx_tensor(tensor, name=""):
"""Convert tensorflow tensor to onnx tensor."""
np_data = get_tf_tensor_data(tensor)
if np_data.dtype == np.object:
# assume np_data is string, numpy_helper.from_array accepts ndarray,
# in which each item is of str while the whole dtype is of object.
try:
# Faster but fails on Unicode
# np_data = np_data.astype(np.str).astype(np.object)
if len(np_data.shape) > 0:
np_data = np_data.astype(np.str).astype(np.object)
else:
np_data = np.array(str(np_data)).astype(np.object)
except UnicodeDecodeError:
decode = np.vectorize(lambda x: x.decode('UTF-8'))
np_data = decode(np_data).astype(np.object)
except Exception as e: # noqa pylint: disable=W0611
raise RuntimeError(f"Not support type: {type(np_data.flat[0])}") from e
return numpy_helper.from_array(np_data, name=name)
tf_utils.tf_to_onnx_tensor = tf_to_onnx_tensor
@gs.Graph.register()
def replace_with_reducemean(self, inputs, outputs):
"""Replace subgraph with ReduceMean."""
# Disconnect output nodes of all input tensors
new_outputs = outputs.copy()
# Disconnet input nodes of all output tensors
for out in outputs:
out.inputs.clear()
# Insert the new node.
return self.layer(op="ReduceMean", inputs=inputs, outputs=new_outputs, attrs={'axes': [2, 3], 'keepdims': 1})
class EfficientDetGraphSurgeon:
"""EfficientDet GraphSurgeon Class."""
def __init__(self, saved_model_path, legacy_plugins=False, dynamic_batch=False, is_qat=False):
"""Constructor of the EfficientDet Graph Surgeon object.
:param saved_model_path: The path pointing to the TensorFlow saved model to load.
:param legacy_plugins: If using TensorRT version < 8.0.1,
set this to True to use older (but slower) plugins.
"""
saved_model_path = os.path.realpath(saved_model_path)
assert os.path.exists(saved_model_path)
# Let TensorRT optimize QDQ nodes instead of TF
from tf2onnx.optimizer import _optimizers # noqa pylint: disable=C0415
updated_optimizers = copy.deepcopy(_optimizers)
del updated_optimizers["q_dq_optimizer"]
del updated_optimizers["const_dequantize_optimizer"]
# Use tf2onnx to convert saved model to an initial ONNX graph.
graph_def, inputs, outputs = tf_loader.from_saved_model(
saved_model_path, None, None, "serve", ["serving_default"])
print(f"Loaded saved model from {saved_model_path}")
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name="")
with tf_loader.tf_session(graph=tf_graph):
onnx_graph = tfonnx.process_tf_graph(
tf_graph, input_names=inputs, output_names=outputs, opset=13)
onnx_model = optimizer.optimize_graph(onnx_graph, optimizers=updated_optimizers).make_model(
f"Converted from {saved_model_path}")
self.graph = gs.import_onnx(onnx_model)
assert self.graph
logger.info("TF2ONNX graph created successfully")
self.is_qat = is_qat
# Fold constants via ONNX-GS that TF2ONNX may have missed
if not self.is_qat:
self.graph.fold_constants()
self.batch_size = None
self.dynamic_batch = dynamic_batch
self.legacy_plugins = legacy_plugins
os_handle, self.tmp_onnx_path = tempfile.mkstemp(suffix='.onnx', dir=saved_model_path)
os.close(os_handle)
def infer(self):
"""Sanitize the graph by cleaning any unconnected nodes.
do a topological resort and fold constant inputs values.
When possible, run shape inference on the ONNX graph to determine tensor shapes.
"""
for _ in range(3):
count_before = len(self.graph.nodes)
self.graph.cleanup().toposort()
try:
for node in self.graph.nodes:
for o in node.outputs:
o.shape = None
model = gs.export_onnx(self.graph)
model = shape_inference.infer_shapes(model)
self.graph = gs.import_onnx(model)
except Exception as e:
raise RuntimeError("Shape inference could not be performed at this time") from e
if not self.is_qat:
try:
self.graph.fold_constants(fold_shapes=True)
except TypeError as e:
raise TypeError("This version of ONNX GraphSurgeon does not support folding shapes, "
"please upgrade your onnx_graphsurgeon module.") from e
count_after = len(self.graph.nodes)
if count_before == count_after:
# No new folding occurred in this iteration, so we can stop for now.
break
def save(self, output_path=None):
"""Save the ONNX model to the given location.
:param output_path: Path pointing to the location where to
write out the updated ONNX model.
"""
self.graph.cleanup().toposort()
model = gs.export_onnx(self.graph)
output_path = output_path or self.tmp_onnx_path
onnx.save(model, output_path)
return output_path
def update_preprocessor(self, input_format, input_size, preprocessor="imagenet"):
"""Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.
:param input_format: The input data format, either "NCHW" or "NHWC".
:param input_size: The input size as a comma-separated string in H,W format, e.g. "512,512".
:param preprocessor: The preprocessor to use, either "imagenet" for imagenet mean and stdev normalization,
or "scale_range" for uniform [-1,+1] range normalization.
"""
# Update the input and output tensors shape
# input_size = input_size.split(",")
assert len(input_size) == 4
assert input_format in ["NCHW", "NHWC"]
if self.dynamic_batch:
# Enable dynamic batchsize
if input_format == "NCHW":
self.graph.inputs[0].shape = ['N', 3, input_size[2], input_size[3]]
if input_format == "NHWC":
self.graph.inputs[0].shape = ['N', input_size[1], input_size[2], 3]
else:
# Disable dynamic batchsize
self.graph.inputs[0].shape = input_size
self.graph.inputs[0].dtype = np.float32
self.graph.inputs[0].name = "input"
print(f"ONNX graph input shape: {self.graph.inputs[0].shape} [{input_format} format]")
self.infer()
# Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them
for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:
node.inputs.clear()
# Convert to NCHW format if needed
input_tensor = self.graph.inputs[0]
if input_format == "NHWC":
input_tensor = self.graph.transpose("preprocessor/transpose", input_tensor, [0, 3, 1, 2])
assert preprocessor in ["imagenet", "scale_range"]
preprocessed_tensor = None
if preprocessor == "imagenet":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 1 / np.asarray([255], dtype=np.float32)
mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))
stddev_val = 1 / np.expand_dims(np.asarray([0.224, 0.224, 0.224], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val * stddev_val)
mean_out = self.graph.elt_const("Add", "preprocessor/mean", scale_out, mean_val * stddev_val)
preprocessed_tensor = mean_out[0]
if preprocessor == "scale_range":
# RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting
scale_val = 2 / np.asarray([255], dtype=np.float32)
offset_val = np.expand_dims(np.asarray([-1, -1, -1], dtype=np.float32), axis=(0, 2, 3))
# y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev
scale_out = self.graph.elt_const("Mul", "preprocessor/scale", input_tensor, scale_val)
range_out = self.graph.elt_const("Add", "preprocessor/range", scale_out, offset_val)
preprocessed_tensor = range_out[0]
# Find the first stem conv node of the graph, and connect the normalizer directly to it
stem_name = "stem_conv"
# Remove transpose in QAT graph: stem <- transpose <- DQ <- Q
if self.is_qat:
print("Removing QAT transpose")
transpose_node = [node for node in self.graph.nodes if node.op == "Transpose" and stem_name in node.o().name][0]
dq_node = transpose_node.i()
dq_node.outputs = transpose_node.outputs
transpose_node.outputs.clear()
stem = [node for node in self.graph.nodes
if node.op == "Conv" and stem_name in node.name][0]
print(f"Found {stem.op} node '{stem.name}' as stem entry")
if self.is_qat:
# # stem <- DQ <- Q
# stem.i().i().i().outputs[0].dtype = np.uint8
stem.i().i().inputs[0] = preprocessed_tensor
else:
stem.inputs[0] = preprocessed_tensor
# Patch for QAT export (@yuw)
if 'auto_pad' not in stem.attrs:
stem.attrs['auto_pad'] = 'NOTSET'
stem.attrs['pads'] = [0, 0, 1, 1]
self.infer()
def update_shapes(self):
"""Update shapes."""
# Reshape nodes have the batch dimension as a fixed value of 1, they should use the batch size instead
# Output-Head reshapes use [1, -1, C], corrected reshape value should be [-1, V, C]
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
shape_in = node.inputs[0].shape
if shape_in is None or len(shape_in) not in [4, 5]: # TFOD graphs have 5-dim inputs on this Reshape
continue
if type(node.inputs[1]) != gs.Constant:
continue
shape_out = node.inputs[1].values
if len(shape_out) != 3 or shape_out[0] != 1 or shape_out[1] != -1:
continue
volume = shape_in[1] * shape_in[2] * shape_in[3] / shape_out[2]
if len(shape_in) == 5:
volume *= shape_in[4]
shape_corrected = np.asarray([-1, volume, shape_out[2]], dtype=np.int64)
node.inputs[1] = gs.Constant(f"{node.name}_shape", values=shape_corrected)
print(f"Updating Output-Head Reshape node {node.name} to {node.inputs[1].values}")
# Other Reshapes only need to change the first dim to -1, as long as there are no -1's already
for node in [node for node in self.graph.nodes if node.op == "Reshape"]:
if type(node.inputs[1]) != gs.Constant or node.inputs[1].values[0] != 1 or -1 in node.inputs[1].values:
continue
node.inputs[1].values[0] = -1
print(f"Updating Reshape node {node.name} to {node.inputs[1].values}")
# Resize nodes try to calculate the output shape dynamically, it's more optimal to pre-compute the shape
# Resize on a BiFPN will always be 2x, but grab it from the graph just in case
for node in [node for node in self.graph.nodes if node.op == "Resize"]:
if len(node.inputs) < 4 or node.inputs[0].shape is None:
continue
scale_h, scale_w = None, None
if type(node.inputs[3]) == gs.Constant:
# The sizes input is already folded
if len(node.inputs[3].values) != 4:
continue
scale_h = node.inputs[3].values[2] / node.inputs[0].shape[2]
scale_w = node.inputs[3].values[3] / node.inputs[0].shape[3]
if type(node.inputs[3]) == gs.Variable:
# The sizes input comes from Shape+Slice+Concat
concat = node.i(3)
if concat.op != "Concat":
continue
if type(concat.inputs[1]) != gs.Constant or len(concat.inputs[1].values) != 2:
continue
scale_h = concat.inputs[1].values[0] / node.inputs[0].shape[2]
scale_w = concat.inputs[1].values[1] / node.inputs[0].shape[3]
scales = np.asarray([1, 1, scale_h, scale_w], dtype=np.float32)
del node.inputs[3]
node.inputs[2] = gs.Constant(name=f"{node.name}_scales", values=scales)
print(f"Updating Resize node {node.name} to {scales}")
self.infer()
def update_network(self):
"""Updates the graph.
To replace certain nodes in the main EfficientDet network
"""
# EXPERIMENTAL
# for node in self.graph.nodes:
# if node.op == "GlobalAveragePool" and node.o().op == "Squeeze" and node.o().o().op == "Reshape":
# # node Mul has two output nodes: GlobalAveragePool and Mul
# # we only disconnect GlobalAveragePool
# self.graph.replace_with_reducemean(node.inputs, node.o().o().outputs)
# print("Pooling removed.")
# self.graph.cleanup().toposort()
pass
def update_nms(self, threshold=None, detections=None):
"""Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.
:param threshold: Override the score threshold attribute. If set to None,
use the value in the graph.
:param detections: Override the max detections attribute. If set to None,
use the value in the graph.
"""
def find_head_concat(name_scope):
# This will find the concatenation node at the end of either Class Net or Box Net.
# These concatenation nodes bring together prediction data for each of 5 scales.
# The concatenated Class Net node will have shape
# [batch_size, num_anchors, num_classes],
# and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].
# These concatenation nodes can be be found by searching for all Concat's
# and checking if the node two steps above in the graph has a name that begins with
# either "box_net/..." or "class_net/...".
for node in [node for node in self.graph.nodes
if node.op == "Transpose" and name_scope in node.name]:
concat = self.graph.find_descendant_by_op(node, "Concat")
assert concat and len(concat.inputs) == 5
logger.info("Found {} node '{}' as the tip of {}".format( # noqa pylint: disable=C0209
concat.op, concat.name, name_scope))
return concat
def extract_anchors_tensor(split):
# This will find the anchors that have been hardcoded somewhere within the ONNX graph.
# The function will return a gs.Constant that can be directly used as
# an input to the NMS plugin.
# The anchor tensor shape will be [1, num_anchors, 4].
# Note that '1' is kept as first dim, regardless of batch size,
# as it's not necessary to replicate the anchors for all images in the batch.
# The anchors are available (one per coordinate) hardcoded as constants
# within certain box decoder nodes.
# Each of these four constants have shape [1, num_anchors], so some numpy operations
# are used to expand the dims and concatenate them as needed.
# These constants can be found by starting from the Box Net's split operation,
# and for each coordinate, walking down in the graph until either an Add or
# Mul node is found. The second input on this nodes will be the anchor data required.
def get_anchor_np(output_idx, op):
node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)
assert node
val = np.squeeze(node.inputs[1].values)
return np.expand_dims(val.flatten(), axis=(0, 2))
anchors_y = get_anchor_np(0, "Add")
anchors_x = get_anchor_np(1, "Add")
anchors_h = get_anchor_np(2, "Mul")
anchors_w = get_anchor_np(3, "Mul")
anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)
return gs.Constant(name="nms/anchors:0", values=anchors)
self.infer()
head_names = ["class-predict", "box-predict"]
# There are five nodes at the bottom of the graph that provide important connection points:
# 1. Find the concat node at the end of the class net (multi-scale class predictor)
class_net = find_head_concat(head_names[0])
class_net_tensor = class_net.outputs[0]
# 2. Find the concat node at the end of the box net (multi-scale localization predictor)
box_net = find_head_concat(head_names[1])
box_net_tensor = box_net.outputs[0]
# 3. Find the split node that separates the box net coordinates
# and feeds them into the box decoder.
box_net_split = self.graph.find_descendant_by_op(box_net, "Split")
assert box_net_split and len(box_net_split.outputs) == 4
# 4. Find the concat node at the end of the box decoder.
box_decoder = self.graph.find_descendant_by_op(box_net_split, "Concat")
assert box_decoder and len(box_decoder.inputs) == 4
# box_decoder_tensor = box_decoder.outputs[0]
# 5. Find the NMS node.
nms_node = self.graph.find_node_by_op("NonMaxSuppression")
# Extract NMS Configuration
num_detections = int(nms_node.inputs[2].values) if detections is None else detections
iou_threshold = float(nms_node.inputs[3].values)
score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold
# num_classes = class_net.i().inputs[1].values[-1]
# normalized = False
# NMS Inputs and Attributes
# NMS expects these shapes for its input tensors:
# box_net: [batch_size, number_boxes, 4]
# class_net: [batch_size, number_boxes, number_classes]
# anchors: [1, number_boxes, 4] (if used)
nms_op = None
nms_attrs = None
nms_inputs = None
# EfficientNMS TensorRT Plugin
# Fusing the decoder will always be faster, so this is
# the default NMS method supported. In this case,
# three inputs are given to the NMS TensorRT node:
# - The box predictions (from the Box Net node found above)
# - The class predictions (from the Class Net node found above)
# - The default anchor coordinates (from the extracted anchor constants)
# As the original tensors from EfficientDet will be used,
# the NMS code type is set to 1 (Center+Size),
# because this is the internal box coding format used by the network.
anchors_tensor = extract_anchors_tensor(box_net_split)
nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]
nms_op = "EfficientNMS_TRT"
nms_attrs = {
'plugin_version': "1",
'background_class': -1,
'max_output_boxes': num_detections,
# Keep threshold to at least 0.01 for better efficiency
'score_threshold': max(0.01, score_threshold),
'iou_threshold': iou_threshold,
'score_activation': True,
'box_coding': 1,
}
nms_output_classes_dtype = np.int32
# NMS Outputs
if self.dynamic_batch:
# Enable dynamic batch
nms_output_num_detections = gs.Variable(
name="num_detections", dtype=np.int32, shape=['N', 1])
nms_output_boxes = gs.Variable(
name="detection_boxes", dtype=np.float32, shape=['N', num_detections, 4])
nms_output_scores = gs.Variable(
name="detection_scores", dtype=np.float32, shape=['N', num_detections])
nms_output_classes = gs.Variable(
name="detection_classes", dtype=nms_output_classes_dtype, shape=['N', num_detections])
else:
nms_output_num_detections = gs.Variable(
name="num_detections", dtype=np.int32, shape=[self.batch_size, 1])
nms_output_boxes = gs.Variable(
name="detection_boxes", dtype=np.float32, shape=[self.batch_size, num_detections, 4])
nms_output_scores = gs.Variable(
name="detection_scores", dtype=np.float32, shape=[self.batch_size, num_detections])
nms_output_classes = gs.Variable(
name="detection_classes", dtype=nms_output_classes_dtype, shape=[self.batch_size, num_detections])
nms_outputs = [
nms_output_num_detections,
nms_output_boxes,
nms_output_scores,
nms_output_classes]
# Create the NMS Plugin node with the selected inputs.
# The outputs of the node will also become the final outputs of the graph.
self.graph.plugin(
op=nms_op,
name="nms/non_maximum_suppression",
inputs=nms_inputs,
outputs=nms_outputs,
attrs=nms_attrs)
logger.info("Created NMS plugin '{}' with attributes: {}".format(nms_op, nms_attrs)) # noqa pylint: disable=C0209
self.graph.outputs = nms_outputs
self.infer()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/exporter/onnx_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT engine builder."""
import logging
import os
import sys
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_tf2.cv.efficientdet.exporter.image_batcher import ImageBatcher
logger = logging.getLogger(__name__)
class EngineCalibrator(trt.IInt8EntropyCalibrator2):
"""Implements the INT8 Entropy Calibrator2."""
def __init__(self, cache_file):
"""Init.
:param cache_file: The location of the cache file.
"""
super().__init__()
self.cache_file = cache_file
self.image_batcher = None
self.batch_allocation = None
self.batch_generator = None
def set_image_batcher(self, image_batcher: ImageBatcher):
"""Define the image batcher to use, if any.
If using only the cache file,
an image batcher doesn't need to be defined.
:param image_batcher: The ImageBatcher object
"""
self.image_batcher = image_batcher
size = int(np.dtype(self.image_batcher.dtype).itemsize * np.prod(self.image_batcher.shape))
self.batch_allocation = cuda.mem_alloc(size)
self.batch_generator = self.image_batcher.get_batch()
def get_batch_size(self):
"""Overrides from trt.IInt8EntropyCalibrator2.
Get the batch size to use for calibration.
:return: Batch size.
"""
if self.image_batcher:
return self.image_batcher.batch_size
return 1
def get_batch(self, names):
"""Overrides from trt.IInt8EntropyCalibrator2.
Get the next batch to use for calibration, as a list of device memory pointers.
:param names: The names of the inputs, if useful to define the order of inputs.
:return: A list of int-casted memory pointers.
"""
if not self.image_batcher:
return None
try:
batch, _, _ = next(self.batch_generator)
logger.info("Calibrating image {} / {}".format( # noqa pylint: disable=C0209
self.image_batcher.image_index, self.image_batcher.num_images))
cuda.memcpy_htod(self.batch_allocation, np.ascontiguousarray(batch))
return [int(self.batch_allocation)]
except StopIteration:
logger.info("Finished calibration batches")
return None
def read_calibration_cache(self):
"""Overrides from trt.IInt8EntropyCalibrator2.
Read the calibration cache file stored on disk, if it exists.
:return: The contents of the cache file, if any.
"""
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info("Using calibration cache file: {}".format(self.cache_file)) # noqa pylint: disable=C0209
return f.read()
return None
def write_calibration_cache(self, cache):
"""Overrides from trt.IInt8EntropyCalibrator2.
Store the calibration cache to a file on disk.
:param cache: The contents of the calibration cache to store.
"""
with open(self.cache_file, "wb") as f:
logger.info("Writing calibration cache data to: {}".format(self.cache_file)) # noqa pylint: disable=C0209
f.write(cache)
class EngineBuilder:
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def __init__(self, verbose=False, workspace=8, is_qat=False):
"""Init.
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger.
:param workspace: Max memory workspace to allow, in Gb.
"""
self.trt_logger = trt.Logger(trt.Logger.INFO)
if verbose:
self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE
trt.init_libnvinfer_plugins(self.trt_logger, namespace="")
self.builder = trt.Builder(self.trt_logger)
self.config = self.builder.create_builder_config()
self.config.max_workspace_size = workspace * (2 ** 30)
# self.batch_size = None
self.network = None
self.parser = None
self.is_qat = is_qat
def create_network(self, onnx_path, batch_size, dynamic_batch_size=None):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
:param onnx_path: The path to the ONNX graph to load.
"""
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
onnx_path = os.path.realpath(onnx_path)
with open(onnx_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: {}".format(onnx_path)) # noqa pylint: disable=C0209
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
logger.info("Network Description")
profile = self.builder.create_optimization_profile()
dynamic_inputs = False
for inp in inputs:
logger.info("Input '{}' with shape {} and dtype {}".format(inp.name, inp.shape, inp.dtype)) # noqa pylint: disable=C0209
if inp.shape[0] == -1:
dynamic_inputs = True
if dynamic_batch_size:
if type(dynamic_batch_size) is str:
dynamic_batch_size = [int(v) for v in dynamic_batch_size.split(",")]
assert len(dynamic_batch_size) == 3
min_shape = [dynamic_batch_size[0]] + list(inp.shape[1:])
opt_shape = [dynamic_batch_size[1]] + list(inp.shape[1:])
max_shape = [dynamic_batch_size[2]] + list(inp.shape[1:])
profile.set_shape(inp.name, min_shape, opt_shape, max_shape)
logger.info("Input '{}' Optimization Profile with shape MIN {} / OPT {} / MAX {}".format( # noqa pylint: disable=C0209
inp.name, min_shape, opt_shape, max_shape))
else:
shape = [batch_size] + list(inp.shape[1:])
profile.set_shape(inp.name, shape, shape, shape)
logger.info("Input '{}' Optimization Profile with shape {}".format(inp.name, shape)) # noqa pylint: disable=C0209
if dynamic_inputs:
self.config.add_optimization_profile(profile)
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8):
"""Build the TensorRT engine and serialize it to disk.
:param engine_path: The path where to serialize the engine to.
:param precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
:param calib_input: The path to a directory holding the calibration images.
:param calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
:param calib_num_images: The maximum number of images to use for calibration.
:param calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building {} Engine in {}".format(precision, engine_path)) # noqa pylint: disable=C0209
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
logger.warning("INT8 is not supported natively on this platform/device")
elif self.is_qat:
print("Exporting a QAT model...")
self.config.set_flag(trt.BuilderFlag.INT8)
else:
assert calib_cache, "cal_cache_file must be specified when exporting a model in PTQ INT8 mode."
if self.builder.platform_has_fast_fp16:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
self.config.set_flag(trt.BuilderFlag.INT8)
self.config.int8_calibrator = EngineCalibrator(calib_cache)
if not os.path.exists(calib_cache):
calib_shape = [calib_batch_size] + list(inputs[0].shape[1:])
calib_dtype = trt.nptype(inputs[0].dtype)
self.config.int8_calibrator.set_image_batcher(
ImageBatcher(calib_input, calib_shape, calib_dtype,
max_num_images=calib_num_images,
exact_batches=True))
engine_bytes = None
try:
engine_bytes = self.builder.build_serialized_network(self.network, self.config)
except AttributeError:
engine = self.builder.build_engine(self.network, self.config)
engine_bytes = engine.serialize()
del engine
assert engine_bytes
with open(engine_path, "wb") as f:
logger.info("Serializing engine to file: {:}".format(engine_path)) # noqa pylint: disable=C0209
f.write(engine_bytes)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/exporter/trt_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ONNX utils."""
import logging
import onnx_graphsurgeon as gs
logger = logging.getLogger(__name__)
@gs.Graph.register()
def elt_const(self, op, name, input, value): # noqa pylint: disable=W0622
"""Element-wise operation.
Add an element-wise operation to the graph which will operate
on the input tensor with the value(s) given.
:param op: The ONNX operation to perform, i.e. "Add" or "Mul".
:param input: The tensor to operate on.
:param value: The value array to operate with.
:param name: The name to use for the node.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
logger.debug("Created {} node '{}': {}".format(op, name, value.squeeze())) # noqa pylint: disable=C0209
const = gs.Constant(name=f"{name}_value:0", values=value)
return self.layer(
name=name, op=op,
inputs=[input_tensor, const], outputs=[name + ":0"])
@gs.Graph.register()
def unsqueeze(self, name, input, axes=None): # noqa pylint: disable=W0622
"""Adds to the graph an Unsqueeze node for the given axes and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be "unsqueezed".
:param axes: A list of axes on which to add the new dimension(s).
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
logger.debug("Created Unsqueeze node '{}': {}".format(name, axes)) # noqa pylint: disable=C0209
return self.layer(
name=name, op="Unsqueeze",
inputs=[input_tensor], outputs=[name + ":0"], attrs={'axes': axes})
@gs.Graph.register()
def transpose(self, name, input, perm): # noqa pylint: disable=W0622
"""Adds to the graph a Transpose node for the given axes permutation and to the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be transposed.
:param perm: A list of axes defining their order after transposing occurs.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
logger.debug("Created Transpose node '{}': {}".format(name, perm)) # noqa pylint: disable=C0209
return self.layer(
name=name, op="Transpose",
inputs=[input_tensor], outputs=[name + ":0"], attrs={'perm': perm})
@gs.Graph.register()
def sigmoid(self, name, input): # noqa pylint: disable=W0622
"""Adds to the graph a Sigmoid node for the given input.
:param self: The gs.Graph object being extended.
:param name: The name to use for the node.
:param input: The tensor to be applied to.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensor = input if type(input) is gs.Variable else input[0]
logger.debug("Created Sigmoid node '{}'".format(name)) # noqa pylint: disable=C0209
return self.layer(
name=name, op="Sigmoid",
inputs=[input_tensor], outputs=[name + ":0"])
@gs.Graph.register()
def plugin(self, op, name, inputs, outputs, attrs): # noqa pylint: disable=W0622
"""Adds to the graph a TensorRT plugin node with the given name, inputs and outputs.
The attrs dictionary holds attributes to be added to the plugin node.
:param self: The gs.Graph object being extended.
:param op: The registered name for the TensorRT plugin.
:param name: The name to use for the node.
:param inputs: The list of tensors to use an inputs.
:param outputs: The list of tensors to use as outputs.
:param attrs: The dictionary to use as attributes.
:return: The first output tensor, to allow chained graph construction.
"""
input_tensors = inputs if type(inputs) is list else [inputs]
logger.debug("Created TRT Plugin node '{}': {}".format(name, attrs)) # noqa pylint: disable=C0209
return self.layer(
op=op, name=name,
inputs=input_tensors, outputs=outputs, attrs=attrs)
@gs.Graph.register()
def find_node_by_op(self, op):
"""Finds the first node in the graph with the given operation name.
:param self: The gs.Graph object being extended.
:param op: The operation name to search for.
:return: The first node matching that performs that op.
"""
for node in self.nodes:
if node.op == op:
return node
return None
@gs.Graph.register()
def find_descendant_by_op(self, node, op, depth=10):
"""Find lower node by matching op name.
Starting from the given node, finds a node lower in the graph
matching the given operation name. This is not an
exhaustive graph search, it will take only the first output of
each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first descendant node matching that performs that op.
"""
for _ in range(depth):
node = node.o()
if node.op == op:
return node
return None
@gs.Graph.register()
def find_ancestor_by_op(self, node, op, depth=10):
"""Find higher node by matching op name.
Starting from the given node, finds a node higher in the graph
matching the given operation name. This is not an
exhaustive graph search, it will take only the first input of
each node traversed while searching depth-first.
:param self: The gs.Graph object being extended.
:param node: The node to start searching from.
:param op: The operation name to search for.
:param depth: Stop searching after traversing these many nodes.
:return: The first ancestor node matching that performs that op.
"""
for _ in range(depth):
node = node.i()
if node.op == op:
return node
return None
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/exporter/onnx_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_tf2.cv.efficientdet.exporter.image_batcher import ImageBatcher
from nvidia_tao_tf2.cv.efficientdet.visualize import vis_utils
class TensorRTInfer:
"""Implements inference for the EfficientDet TensorRT engine."""
def __init__(self, engine_path,
label_id_mapping=None,
min_score_thresh=0.3):
"""Init.
:param engine_path: The path to the serialized engine to load from disk.
"""
self.label_id_mapping = label_id_mapping or {}
self.min_score_thresh = min_score_thresh or 0.3
# Load TRT engine
self.logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(self.logger, namespace="")
with open(engine_path, "rb") as f, trt.Runtime(self.logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
assert self.engine
assert self.context
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
is_input = False
if self.engine.binding_is_input(i):
is_input = True
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
if is_input:
self.batch_size = shape[0]
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape:
size *= s
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""Get the specs for the input tensor of the network. Useful to prepare memory allocations.
:return: Two items, the shape of the input tensor and its (numpy) datatype.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""Get the specs for the output tensors of the network. Useful to prepare memory allocations.
:return: A list with two items per element,
the shape and (numpy) datatype of each output tensor.
"""
specs = []
for o in self.outputs:
specs.append((o['shape'], o['dtype']))
return specs
def infer(self, batch, scales=None, nms_threshold=None):
"""Execute inference on a batch of images.
The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
:param batch: A numpy array holding the image batch.
:param scales: The image resize scales for each image in this batch.
Default: No scale postprocessing applied.
:return: A nested list for each image in the batch and each detection in the list.
"""
# Prepare the output data
outputs = []
for shape, dtype in self.output_spec():
outputs.append(np.zeros(shape, dtype))
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(batch))
self.context.execute_v2(self.allocations)
for o in range(len(outputs)):
cuda.memcpy_dtoh(outputs[o], self.outputs[o]['allocation'])
# Process the results
nums = outputs[0]
boxes = outputs[1]
scores = outputs[2]
classes = outputs[3]
detections = []
normalized = (np.max(boxes) < 2.0)
for i in range(self.batch_size):
detections.append([])
for n in range(int(nums[i])):
scale = self.inputs[0]['shape'][2] if normalized else 1.0
if scales and i < len(scales):
scale /= scales[i]
if nms_threshold and scores[i][n] < nms_threshold:
continue
detections[i].append({
'ymin': boxes[i][n][0] * scale,
'xmin': boxes[i][n][1] * scale,
'ymax': boxes[i][n][2] * scale,
'xmax': boxes[i][n][3] * scale,
'score': scores[i][n],
'class': int(classes[i][n]),
})
return detections
def __del__(self):
"""Simple function to destroy tensorrt handlers."""
if self.context:
del self.context
if self.engine:
del self.engine
def visualize_detections(self, image_dir, output_dir, dump_label=False):
"""Visualize detection."""
# TODO(@yuw): to use vis_utils function.
labels = [i[1] for i in sorted(self.label_id_mapping.items(), key=lambda x: x[0])]
batcher = ImageBatcher(image_dir, *self.input_spec())
for batch, images, scales in batcher.get_batch():
print(f"Processing Image {batcher.image_index} / {batcher.num_images}", end="\r")
detections = self.infer(batch, scales, self.min_score_thresh)
for i in range(len(images)):
basename = os.path.splitext(os.path.basename(images[i]))[0]
# Image Visualizations
output_path = os.path.join(output_dir, f"{basename}.png")
vis_utils.visualize_detections(images[i], output_path, detections[i], labels)
if dump_label:
out_label_path = os.path.join(output_dir, "labels")
os.makedirs(out_label_path, exist_ok=True)
assert self.label_id_mapping, "Label mapping must be valid to generate KIITI labels."
# Generate KITTI labels
kitti_txt = ""
for d in detections[i]:
kitti_txt += self.label_id_mapping[int(d['class']) + 1] + ' 0 0 0 ' + ' '.join(
[str(d['xmin']), str(d['ymin']), str(d['xmax']), str(d['ymax'])]) + \
' 0 0 0 0 0 0 0 ' + str(d['score']) + '\n'
with open(os.path.join(out_label_path, f"{basename}.txt"), "w", encoding='utf-8') as f:
f.write(kitti_txt)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/inferencer/inference_trt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference related utilities."""
import functools
import logging
import os
from typing import Text, Tuple, Union
import numpy as np
from PIL import Image
import tensorflow as tf
import yaml
from nvidia_tao_tf2.cv.efficientdet.dataloader import dataloader
from nvidia_tao_tf2.cv.efficientdet.model import anchors
from nvidia_tao_tf2.cv.efficientdet.utils import model_utils
from nvidia_tao_tf2.cv.efficientdet.visualize import vis_utils
coco_id_mapping = {
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane',
6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light',
11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench',
16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow',
22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack',
28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee',
35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite',
39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard',
43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork',
49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple',
54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog',
59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch',
64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv',
73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone',
78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator',
84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear',
89: 'hair drier', 90: 'toothbrush',
} # pyformat: disable
def image_preprocess(image, image_size: Union[int, Tuple[int, int]]):
"""Preprocess image for inference.
Args:
image: input image, can be a tensor or a numpy array.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
Returns:
(image, scale): a tuple of processed image and its scale.
"""
input_processor = dataloader.DetectionInputProcessor(image, image_size)
input_processor.normalize_image()
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale
def batch_image_files_decode(image_files):
"""Decode batch of images."""
def decode(image_file):
image = tf.io.decode_image(image_file)
image.set_shape([None, None, None])
return image
raw_images = tf.map_fn(decode, image_files, dtype=tf.uint8)
return tf.stack(raw_images)
def batch_image_preprocess(raw_images,
image_size: Union[int, Tuple[int, int]],
batch_size: int = None):
"""Preprocess batched images for inference.
Args:
raw_images: a list of images, each image can be a tensor or a numpy array.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
batch_size: if None, use map_fn to deal with dynamic batch size.
Returns:
(image, scale): a tuple of processed images and scales.
"""
if not batch_size:
# map_fn is a little bit slower due to some extra overhead.
map_fn = functools.partial(image_preprocess, image_size=image_size)
images, scales = tf.map_fn(
map_fn, raw_images, dtype=(tf.float32, tf.float32), back_prop=False)
return (images, scales)
# If batch size is known, use a simple loop.
scales, images = [], []
for i in range(batch_size):
image, scale = image_preprocess(raw_images[i], image_size)
scales.append(scale)
images.append(image)
images = tf.stack(images)
scales = tf.stack(scales)
return (images, scales)
def build_inputs(image_path_pattern: Text, image_size: Union[int, Tuple[int, int]]):
"""Read and preprocess input images.
Args:
image_path_pattern: a path to indicate a single or multiple files.
image_size: single integer of image size for square image or tuple of two
integers, in the format of (image_height, image_width).
Returns:
(raw_images, images, scales): raw images, processed images, and scales.
Raises:
ValueError if image_path_pattern doesn't match any file.
"""
raw_images, fnames = [], []
for fname in tf.io.gfile.glob(image_path_pattern):
image = Image.open(fname).convert('RGB')
image = np.array(image)
raw_images.append(image)
fnames.append(fname)
if not raw_images:
raise ValueError(
f'Cannot find any images for pattern {image_path_pattern}')
return raw_images, fnames
def det_post_process_combined(params, cls_outputs, box_outputs, scales,
min_score_thresh, max_boxes_to_draw):
"""A combined version of det_post_process with dynamic batch size support."""
batch_size = tf.shape(list(cls_outputs)[0])[0]
cls_outputs_all = []
box_outputs_all = []
# Concatenates class and box of all levels into one tensor.
for level in range(0, params['max_level'] - params['min_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(
tf.reshape(box_outputs[level], [batch_size, -1, 4]))
cls_outputs_all = tf.concat(cls_outputs_all, 1)
box_outputs_all = tf.concat(box_outputs_all, 1)
# cast to float32
cls_outputs_all = tf.cast(cls_outputs_all, dtype=tf.float32)
box_outputs_all = tf.cast(box_outputs_all, dtype=tf.float32)
# Create anchor_label for picking top-k predictions.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
anchor_boxes = eval_anchors.boxes
scores = tf.math.sigmoid(cls_outputs_all)
# apply bounding box regression to anchors
boxes = anchors.decode_box_outputs_tf(box_outputs_all, anchor_boxes)
boxes = tf.expand_dims(boxes, axis=2)
scales = tf.expand_dims(scales, axis=-1)
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_boxes_to_draw,
max_boxes_to_draw,
score_threshold=min_score_thresh,
clip_boxes=False))
del valid_detections # to be used in future.
image_ids = tf.cast(
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1), [1, max_boxes_to_draw]),
dtype=tf.float32)
image_size = model_utils.parse_image_size(params['image_size'])
ymin = tf.clip_by_value(nmsed_boxes[..., 0], 0, image_size[0]) * scales
xmin = tf.clip_by_value(nmsed_boxes[..., 1], 0, image_size[1]) * scales
ymax = tf.clip_by_value(nmsed_boxes[..., 2], 0, image_size[0]) * scales
xmax = tf.clip_by_value(nmsed_boxes[..., 3], 0, image_size[1]) * scales
classes = tf.cast(nmsed_classes + 1, tf.float32)
detection_list = [image_ids, ymin, xmin, ymax, xmax, nmsed_scores, classes]
detections = tf.stack(detection_list, axis=2, name='detections')
return detections
def visualize_image(image,
boxes,
classes,
scores,
id_mapping,
min_score_thresh=anchors.MIN_SCORE_THRESH,
max_boxes_to_draw=anchors.MAX_DETECTIONS_PER_IMAGE,
line_thickness=2,
**kwargs):
"""Visualizes a given image.
Args:
image: a image with shape [H, W, C].
boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
classes: a class prediction with shape [N].
scores: A list of float value with shape [N].
id_mapping: a dictionary from class id to name.
min_score_thresh: minimal score for showing. If claass probability is below
this threshold, then the object will not show up.
max_boxes_to_draw: maximum bounding box to draw.
line_thickness: how thick is the bounding box line.
**kwargs: extra parameters.
Returns:
output_image: an output image with annotated boxes and classes.
"""
category_index = {k: {'id': k, 'name': id_mapping[k]} for k in id_mapping}
img = np.array(image)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
boxes,
classes,
scores,
category_index,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
line_thickness=line_thickness,
**kwargs)
return img
def parse_label_id_mapping(label_id_mapping):
"""Parse label id mapping from a string or a yaml file.
The label_id_mapping is a dict that maps class id to its name, such as:
{
1: "person",
2: "dog"
}
Args:
label_id_mapping:
Returns:
A dictionary with key as integer id and value as a string of name.
"""
if label_id_mapping is None:
return coco_id_mapping
if isinstance(label_id_mapping, dict):
label_id_dict = label_id_mapping
elif isinstance(label_id_mapping, str):
with tf.io.gfile.GFile(label_id_mapping) as f:
label_id_dict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise TypeError('label_id_mapping must be a dict or a yaml filename, '
'containing a mapping from class ids to class names.')
return label_id_dict
def visualize_image_prediction(image,
prediction,
disable_pyfun=True,
label_id_mapping=None,
**kwargs):
"""Visualize detections on a given image.
Args:
image: Image content in shape of [height, width, 3].
prediction: a list of vector, with each vector has the format of [image_id,
ymin, xmin, ymax, xmax, score, class].
disable_pyfunc: disable pyfunc for faster post processing.
label_id_mapping: a map from label id to name.
**kwargs: extra parameters for visualization, such as min_score_thresh,
max_boxes_to_draw, and line_thickness.
Returns:
a list of annotated images.
"""
boxes = prediction[:, 1:5]
classes = prediction[:, 6].astype(int)
scores = prediction[:, 5]
if not disable_pyfun:
# convert [x, y, width, height] to [y, x, height, width]
boxes[:, [0, 1, 2, 3]] = boxes[:, [1, 0, 3, 2]]
label_id_mapping = label_id_mapping or {} # coco_id_mapping
return visualize_image(image, boxes, classes, scores, label_id_mapping,
**kwargs)
class InferenceModel(tf.Module):
"""EfficientDet Inference Model."""
def __init__(self, model, input_shape, params,
batch_size=1, label_id_mapping=None,
min_score_thresh=0.001, max_boxes_to_draw=100):
"""Init."""
super().__init__()
self.model = model
self.input_shape = input_shape
self.batch_size = batch_size
self.params = params
self.disable_pyfun = True
self.label_id_mapping = label_id_mapping or {}
self.min_score_thresh = min_score_thresh
self.max_boxes_to_draw = max_boxes_to_draw
def infer(self, imgs):
"""Run inference on a batch of images."""
images, scales = batch_image_preprocess(imgs, self.input_shape, self.batch_size)
cls_outputs, box_outputs = self.model(images, training=False)
detections = det_post_process_combined(
self.params,
cls_outputs, box_outputs,
scales,
min_score_thresh=self.min_score_thresh,
max_boxes_to_draw=self.max_boxes_to_draw)
return detections
def visualize_detections(self, image_paths, output_dir, dump_label=False, **kwargs):
"""Visualize detections."""
# TODO(@yuw): to use vis_utils function.
raw_images, fnames = build_inputs(image_paths, self.input_shape)
self.batch_size = min(len(raw_images), self.batch_size)
if self.batch_size < 1:
return
# run inference and render annotations
predictions = np.array(self.infer(raw_images))
for i, prediction in enumerate(predictions):
img = visualize_image_prediction(
raw_images[i],
prediction,
disable_pyfun=self.disable_pyfun,
label_id_mapping=self.label_id_mapping,
min_score_thresh=self.min_score_thresh,
max_boxes_to_draw=self.max_boxes_to_draw,
**kwargs)
output_image_path = os.path.join(output_dir, os.path.basename(fnames[i]))
Image.fromarray(img).save(output_image_path)
logging.info('writing output image to %s', output_image_path)
if dump_label:
out_label_path = os.path.join(output_dir, 'labels')
assert self.label_id_mapping, \
"Label mapping must be valid to generate KIITI labels."
os.makedirs(out_label_path, exist_ok=True)
# Generate KITTI labels
kitti_txt = ""
for d in prediction:
if d[5] >= self.min_score_thresh:
kitti_txt += self.label_id_mapping[int(d[6])] + ' 0 0 0 ' + ' '.join(
[str(i) for i in [d[2], d[1], d[4], d[3]]]) + ' 0 0 0 0 0 0 0 ' + \
str(d[5]) + '\n'
basename = os.path.splitext(os.path.basename(fnames[i]))[0]
with open(os.path.join(out_label_path, f"{basename}.txt"), "w", encoding='utf-8') as f:
f.write(kitti_txt)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/inferencer/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to store default experiment specs for EfficientDet tasks."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/experiment_specs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EFF EMA Checkpoint Callback."""
import os
import shutil
import tempfile
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
from nvidia_tao_tf2.cv.efficientdet.utils.helper import fetch_optimizer
from nvidia_tao_tf2.cv.efficientdet.utils.helper import dump_json, dump_eval_json, encode_eff
class EffEmaCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint [original class].
NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed)
NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer)
Attributes:
update_weights: If True, assign the moving average weights
to the model, and save them. If False, keep the old
non-averaged weights, but the saved model uses the
average weights.
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(self,
eff_dir: str,
encryption_key: str,
update_weights: bool,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
is_qat: bool = False,
**kwargs):
"""Init."""
super().__init__(
eff_dir,
monitor=monitor,
verbose=verbose,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
mode=mode,
save_freq=save_freq,
**kwargs)
self.update_weights = update_weights
self.ema_opt = None
self.eff_dir = eff_dir
self.encryption_key = encryption_key
self.is_qat = is_qat
def set_model(self, model):
"""Set model."""
self.ema_opt = fetch_optimizer(model, MovingAverage)
return super().set_model(model)
def _save_model(self, epoch, batch, logs):
"""Save model."""
assert isinstance(self.ema_opt, MovingAverage), "optimizer must be wrapped in MovingAverage"
if self.update_weights:
self.ema_opt.assign_average_vars(self.model.variables)
super()._save_model(epoch, batch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.ema_opt.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
super()._save_model(epoch, batch, logs)
self.model.set_weights(non_avg_weights)
def _remove_tmp_files(self):
"""Remove temporary zip file and directory."""
# TODO(@yuw): try catch?
os.remove(self.temp_zip_file)
shutil.rmtree(os.path.dirname(self.filepath))
def on_epoch_end(self, epoch, logs=None):
"""Override on_epoch_end."""
self.epochs_since_last_save += 1
eff_epoch = epoch + 1 # eff name started with 001
checkpoint_dir = tempfile.mkdtemp()
self.filepath = os.path.join(checkpoint_dir, f'emackpt-{epoch:03d}') # override filepath
# pylint: disable=protected-access
if self.save_freq == 'epoch' and self.epochs_since_last_save >= self.period:
self._save_model(epoch=epoch, batch=None, logs=logs) # To self.filepath
# WORKAROUND to save QAT graph
if self.is_qat:
shutil.copy(os.path.join(self.eff_dir, 'train_graph.json'), checkpoint_dir)
shutil.copy(os.path.join(self.eff_dir, 'eval_graph.json'), checkpoint_dir)
else:
# save train/eval graph json to checkpoint_dir
dump_json(self.model, os.path.join(checkpoint_dir, 'train_graph.json'))
dump_eval_json(checkpoint_dir, eval_graph='eval_graph.json')
# convert content in self.filepath to EFF
eff_filename = f'{self.model.name}_{eff_epoch:03d}.tlt'
eff_model_path = os.path.join(self.eff_dir, eff_filename)
self.temp_zip_file = encode_eff(
checkpoint_dir,
eff_model_path, self.encryption_key)
self._remove_tmp_files()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/eff_ema_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing callbacks for EfficientDet."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric logging callback."""
from datetime import timedelta
import numpy as np
import time
import tensorflow as tf
from nvidia_tao_tf2.common.mlops.wandb import alert
import nvidia_tao_tf2.common.logging.logging as status_logging
class MetricLogging(tf.keras.callbacks.Callback):
"""Learning Rate Tensorboard Callback."""
def __init__(self, num_epochs, steps_per_epoch, initial_epoch, **kwargs):
"""Init."""
super().__init__(**kwargs)
self.num_epochs = num_epochs
self.steps_before_epoch = steps_per_epoch * initial_epoch
self.steps_in_epoch = 0
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
# total loss
self.total_loss = 0
self.s_logger = status_logging.get_status_logger()
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
def on_batch_end(self, batch, logs=None):
"""on_batch_end."""
self.steps_in_epoch = batch + 1
if np.isnan(float(logs.get('loss'))):
alert(
title='nan loss',
text='Training loss is nan',
level=1,
duration=1800,
is_master=True
)
self.total_loss += float(logs.get('loss'))
def on_epoch_begin(self, epoch, logs=None):
"""on_epoch_begin."""
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""on_epoch_end."""
self.steps_before_epoch += self.steps_in_epoch
avg_loss = self.total_loss / self.steps_in_epoch
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
# reset loss and steps
self.steps_in_epoch = 0
self.total_loss = 0
# dump log
self.write_status_json(avg_loss, epoch)
def write_status_json(self, loss, current_epoch):
"""Write out the data to the status.json file initiated by the experiment for monitoring.
Args:
loss (float): average training loss to be recorder in the monitor.
current_epoch (int): Current epoch.
"""
current_epoch += 1 # 1-based
lr = self.model.optimizer.lr(self.global_steps).numpy()
monitor_data = {
"epoch": current_epoch,
"max_epoch": self.num_epochs,
"time_per_epoch": str(timedelta(seconds=self.time_per_epoch)),
# "eta": str(timedelta(seconds=(self.num_epochs - current_epoch) * self.time_per_epoch)),
"loss": loss,
"learning_rate": float(lr)
}
# Save the json file.
try:
self.s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/logging_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Callback related utils."""
import os
from mpi4py import MPI
import numpy as np
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
import nvidia_tao_tf2.common.logging.logging as status_logging
from nvidia_tao_tf2.cv.efficientdet.processor.postprocessor import EfficientDetPostprocessor
from nvidia_tao_tf2.cv.efficientdet.utils import coco_metric
from nvidia_tao_tf2.cv.efficientdet.utils import label_utils
from nvidia_tao_tf2.cv.efficientdet.utils.helper import fetch_optimizer
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import is_main_process
from nvidia_tao_tf2.cv.efficientdet.visualize import vis_utils
class COCOEvalCallback(tf.keras.callbacks.Callback):
"""COCO Evaluation Callback."""
def __init__(self, eval_dataset, eval_model, eval_freq, start_eval_epoch, hparams, **kwargs):
"""Init."""
super().__init__(**kwargs)
self.dataset = eval_dataset
self.eval_model = eval_model
self.eval_freq = eval_freq
self.start_eval_epoch = start_eval_epoch
self.hparams = hparams
self.ema_opt = None
self.postpc = EfficientDetPostprocessor(self.hparams)
log_dir = os.path.join(hparams['results_dir'], 'tb_events', 'eval')
self.file_writer = tf.summary.create_file_writer(log_dir)
label_map = label_utils.get_label_map(hparams['eval_label_map'])
self.evaluator = coco_metric.EvaluationMetric(
filename=hparams['val_json_file'], label_map=label_map)
self.pbar = tf.keras.utils.Progbar(hparams['eval_samples'])
self.s_logger = status_logging.get_status_logger()
def set_model(self, model):
"""Set model."""
if self.hparams['moving_average_decay'] > 0:
self.ema_opt = fetch_optimizer(model, MovingAverage)
return super().set_model(model)
@tf.function
def eval_model_fn(self, images, labels):
"""Evaluation model function."""
cls_outputs, box_outputs = self.eval_model(images, training=False)
detections = self.postpc.generate_detections(
cls_outputs, box_outputs,
labels['image_scales'],
labels['source_ids'])
def transform_detections(detections):
# A transforms detections in [id, x1, y1, x2, y2, score, class]
# form to [id, x, y, w, h, score, class]."""
return tf.stack([
detections[:, :, 0],
detections[:, :, 1],
detections[:, :, 2],
detections[:, :, 3] - detections[:, :, 1],
detections[:, :, 4] - detections[:, :, 2],
detections[:, :, 5],
detections[:, :, 6],
], axis=-1)
tf.numpy_function(
self.evaluator.update_state,
[labels['groundtruth_data'], transform_detections(detections)], [])
return detections, labels['image_scales']
def evaluate(self, epoch):
"""Run evalution at Nth epoch."""
if self.hparams['moving_average_decay'] > 0:
self.ema_opt.swap_weights() # get ema weights
self.eval_model.set_weights(self.model.get_weights())
self.evaluator.reset_states()
# evaluate all images.
for i, (images, labels) in enumerate(self.dataset):
detections, scales = self.eval_model_fn(images, labels)
# [id, x1, y1, x2, y2, score, class]
if self.hparams['image_preview'] and i == 0:
bs_index = 0
image = np.copy(images[bs_index])
if self.hparams['data_format'] == 'channels_first':
image = np.transpose(image, (1, 2, 0))
# decode image
image = vis_utils.denormalize_image(image)
predictions = np.array(detections[bs_index])
predictions[:, 1:5] /= scales[bs_index]
boxes = predictions[:, 1:5].astype(np.int32)
boxes = boxes[:, [1, 0, 3, 2]]
classes = predictions[:, -1].astype(np.int32)
scores = predictions[:, -2]
image = vis_utils.visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
{},
min_score_thresh=0.3,
max_boxes_to_draw=100,
line_thickness=2)
with self.file_writer.as_default():
tf.summary.image('Image Preview', tf.expand_dims(image, axis=0), step=epoch)
# draw detections
if is_main_process():
self.pbar.update(i)
# gather detections from all ranks
self.evaluator.gather()
# compute the final eval results.
if is_main_process():
metrics = self.evaluator.result()
metric_dict = {}
with self.file_writer.as_default(), tf.summary.record_if(True):
for i, name in enumerate(self.evaluator.metric_names):
tf.summary.scalar(name, metrics[i], step=epoch)
metric_dict[name] = metrics[i]
for k, v in metric_dict.items():
self.s_logger.kpi[k] = float(v)
try:
self.s_logger.write(
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
if self.hparams['moving_average_decay'] > 0:
self.ema_opt.swap_weights() # get base weights
MPI.COMM_WORLD.Barrier() # noqa pylint: disable=I1101
def on_epoch_end(self, epoch, logs=None):
"""on_epoch_end with eval_freq."""
if (epoch + 1) >= self.start_eval_epoch and (epoch + 1) % self.eval_freq == 0:
self.evaluate(epoch)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/eval_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Moving average callback implementation."""
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
from typing import Any, MutableMapping, Text
from nvidia_tao_tf2.cv.efficientdet.utils.helper import fetch_optimizer
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `MovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self,
overwrite_weights_on_train_end: bool = False,
**kwargs):
"""Init."""
super().__init__(**kwargs)
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
self.ema_opt = None
def set_model(self, model: tf.keras.Model):
"""Set model."""
super().set_model(model)
self.ema_opt = fetch_optimizer(model, MovingAverage)
self.ema_opt.shadow_copy(self.model.weights)
def on_test_begin(self, logs: MutableMapping[Text, Any] = None):
"""Override on_step_begin."""
self.ema_opt.swap_weights()
def on_test_end(self, logs: MutableMapping[Text, Any] = None):
"""Override on_test_end."""
self.ema_opt.swap_weights()
def on_train_end(self, logs: MutableMapping[Text, Any] = None):
"""Override on_train_end."""
if self.overwrite_weights_on_train_end:
self.ema_opt.assign_average_vars(self.model.variables)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/moving_average_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorboard callback for learning rate schedules."""
import tensorflow as tf
class LRTensorBoard(tf.keras.callbacks.Callback):
"""Learning Rate Tensorboard Callback."""
def __init__(self, steps_per_epoch, initial_epoch, log_dir, **kwargs):
"""Init."""
super().__init__(**kwargs)
self.summary_writer = tf.summary.create_file_writer(log_dir)
self.steps_before_epoch = steps_per_epoch * initial_epoch
self.steps_in_epoch = 0
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
def on_batch_end(self, batch, logs=None):
"""on_batch_end."""
self.steps_in_epoch = batch + 1
lr = self.model.optimizer.lr(self.global_steps)
with self.summary_writer.as_default():
tf.summary.scalar('learning_rate', lr, self.global_steps)
def on_epoch_end(self, epoch, logs=None):
"""on_epoch_end."""
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
def on_train_end(self, logs=None):
"""on_train_end."""
self.summary_writer.flush()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/lr_tensorboard.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Callback related utils."""
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
from nvidia_tao_tf2.cv.efficientdet.utils.helper import fetch_optimizer
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint [original class].
NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed)
NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer)
Attributes:
update_weights: If True, assign the moving average weights
to the model, and save them. If False, keep the old
non-averaged weights, but the saved model uses the
average weights.
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
"""Init."""
super().__init__(
filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
**kwargs)
self.update_weights = update_weights
self.ema_opt = None
def set_model(self, model):
"""Set model."""
self.ema_opt = fetch_optimizer(model, MovingAverage)
return super().set_model(model)
def _save_model(self, epoch, batch, logs):
"""Save model."""
assert isinstance(self.ema_opt, MovingAverage)
if self.update_weights:
self.ema_opt.assign_average_vars(self.model.variables)
super()._save_model(epoch, batch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.ema_opt.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
super()._save_model(epoch, batch, logs)
self.model.set_weights(non_avg_weights)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/average_model_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EFF Checkpoint Callback."""
import os
import shutil
import tempfile
import tensorflow as tf
from nvidia_tao_tf2.cv.efficientdet.utils.helper import dump_json, dump_eval_json, encode_eff
class EffCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Attributes:
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(self,
eff_dir: str,
encryption_key: str,
graph_only: bool = False,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
is_qat: bool = False,
**kwargs):
"""Init."""
super().__init__(
eff_dir,
monitor=monitor,
verbose=verbose,
save_best_only=save_best_only,
save_weights_only=save_weights_only,
mode=mode,
save_freq=save_freq,
**kwargs)
self.eff_dir = eff_dir
self.encryption_key = encryption_key
self.graph_only = graph_only
self.is_qat = is_qat
def _remove_tmp_files(self):
"""Remove temporary zip file and directory."""
shutil.rmtree(os.path.dirname(self.filepath))
os.remove(self.temp_zip_file)
def on_epoch_end(self, epoch, logs=None):
"""Override on_epoch_end."""
self.epochs_since_last_save += 1
eff_epoch = epoch + 1 # eff name started with 001
checkpoint_dir = tempfile.mkdtemp()
self.filepath = os.path.join(checkpoint_dir, f'ckpt-{epoch:03d}') # override filepath
# pylint: disable=protected-access
if self.save_freq == 'epoch' and self.epochs_since_last_save >= self.period:
self._save_model(epoch=epoch, batch=None, logs=logs) # To self.filepath
if self.graph_only:
eff_filename = f"{self.model.name}.resume"
else:
eff_filename = f'{self.model.name}_{eff_epoch:03d}.tlt'
# WORKAROUND to save QAT graph
if self.is_qat:
shutil.copy(os.path.join(self.eff_dir, 'train_graph.json'), checkpoint_dir)
shutil.copy(os.path.join(self.eff_dir, 'eval_graph.json'), checkpoint_dir)
else:
# save train/eval graph json to checkpoint_dir
dump_json(self.model, os.path.join(checkpoint_dir, 'train_graph.json'))
dump_eval_json(checkpoint_dir, eval_graph='eval_graph.json')
eff_model_path = os.path.join(self.eff_dir, eff_filename)
# convert content in self.filepath to EFF
self.temp_zip_file = encode_eff(
os.path.dirname(self.filepath),
eff_model_path, self.encryption_key)
self._remove_tmp_files()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/callback/eff_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet Trainer."""
import logging
from nvidia_tao_tf2.blocks.trainer import Trainer
logger = logging.getLogger(__name__)
class EfficientDetTrainer(Trainer):
"""EfficientDet Trainer."""
def __init__(self, num_epochs, qat=False, callbacks=None):
"""Init."""
self.num_epochs = num_epochs
self.callbacks = callbacks
self.qat = qat
def fit(self,
module,
train_dataset,
eval_dataset,
verbose) -> None:
"""Run model.fit with custom steps."""
if module.initial_epoch < self.num_epochs:
module.model.fit(
train_dataset,
epochs=self.num_epochs,
steps_per_epoch=module.steps_per_epoch,
initial_epoch=module.initial_epoch,
callbacks=self.callbacks,
verbose=verbose,
validation_data=eval_dataset,
validation_steps=module.num_samples)
else:
logger.info("Training (%d epochs) has finished.", self.num_epochs)
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/trainer/efficientdet_trainer.py |
"""Train module."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/trainer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit EfficientDet dataloader module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Source Class."""
class DataSource:
"""Datasource class."""
def __init__(self, tfrecord_patterns, image_dirs):
"""Init."""
assert len(tfrecord_patterns) > 0, "Error!"
if len(image_dirs) > 0:
assert len(image_dirs) == len(tfrecord_patterns), "The number of image directories does" \
"not match the number of TFRecords paths."
else:
image_dirs = ['/'] * len(tfrecord_patterns)
self.tfrecord_patterns = list(tfrecord_patterns)
self.image_dirs = list(image_dirs)
def __len__(self):
"""Number of tfrecords."""
return len(self.tfrecord_patterns)
def __iter__(self):
"""Return iterator."""
self.n = 0
return self
def __next__(self):
"""Get next record."""
if self.n < len(self.tfrecord_patterns):
tfr = self.tfrecord_patterns[self.n]
img_dir = self.image_dirs[self.n]
self.n += 1
return tfr, img_dir
raise StopIteration
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/dataloader/datasource.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet standalone visualization tool."""
import os
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from PIL import Image
from nvidia_tao_tf2.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_tf2.cv.efficientdet.config.default_config import ExperimentConfig
from nvidia_tao_tf2.cv.efficientdet.dataloader import dataloader, datasource
from nvidia_tao_tf2.cv.efficientdet.utils import hparams_config
from nvidia_tao_tf2.cv.efficientdet.utils.config_utils import generate_params_from_cfg
from nvidia_tao_tf2.cv.efficientdet.visualize import vis_utils
def visualize(cfg):
"""Run single image visualization."""
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
MODE = 'vis'
# Parse and update hparams
config = hparams_config.get_detection_config(cfg.model.name)
config.update(generate_params_from_cfg(config, cfg, mode=MODE))
# Set up dataloader
eval_sources = datasource.DataSource(
cfg.dataset.val_tfrecords,
cfg.dataset.val_dirs)
eval_dl = dataloader.CocoDataset(
eval_sources,
is_training=True,
use_fake_data=False,
max_instances_per_image=config.max_instances_per_image)
eval_dataset = eval_dl(
config.as_dict(),
batch_size=1)
iterator = iter(eval_dataset)
counter = 1
for next_element in iterator:
# next_element = iterator.get_next()
image = next_element[0][0, ...] # h, w, c
image = image.numpy()
image2v = vis_utils.denormalize_image(image)
Image.fromarray(image2v).save(os.path.join(cfg.results_dir, f'dl_00{counter}.png'))
counter += 1
if counter > 10:
break
print("Finished visualization.")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="vis", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for EfficientDet evaluation."""
visualize(cfg)
if __name__ == '__main__':
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/dataloader/data_visualizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing."""
import logging
import os
import multiprocessing
import tensorflow as tf
from nvidia_tao_tf2.blocks.dataloader.dataset import Dataset
from nvidia_tao_tf2.cv.efficientdet.utils import model_utils
from nvidia_tao_tf2.cv.efficientdet.model import anchors
from nvidia_tao_tf2.cv.efficientdet.utils.horovod_utils import get_rank, get_world_size, is_main_process
from nvidia_tao_tf2.cv.efficientdet.utils.keras_utils import get_mixed_precision_policy
from nvidia_tao_tf2.cv.core import preprocessor
from nvidia_tao_tf2.cv.core import tf_example_decoder
logger = logging.getLogger(__name__)
class InputProcessor:
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
if isinstance(output_size, int):
self._output_size = (output_size, output_size)
else:
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
def normalize_image(self, dtype=tf.float32):
"""Normalize the image to zero mean and unit variance."""
self._image = tf.image.convert_image_dtype(self._image, dtype=dtype)
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
scale = tf.constant([0.224, 0.224, 0.224])
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def get_image(self):
"""Get image."""
return self._image
def set_training_random_scale_factors(self,
scale_min,
scale_max,
target_size=None):
"""Set the parameters for multiscale training.
Notably, if train and eval use different sizes, then target_size should be
set as eval size to avoid the discrency between train and eval.
Args:
scale_min: minimal scale factor.
scale_max: maximum scale factor.
target_size: targeted size, usually same as eval. If None, use train size.
"""
if not target_size:
target_size = self._output_size
target_size = model_utils.parse_image_size(target_size)
if is_main_process():
logger.debug('target_size = %s, output_size = %s', target_size, self._output_size)
# Select a random scale factor.
random_scale_factor = tf.random.uniform([], scale_min, scale_max)
scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(self._output_size[0], tf.float32) / height
image_scale_x = tf.cast(self._output_size[1], tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
# dtype = self._image.dtype
scaled_image = tf.compat.v1.image.resize(
self._image, [self._scaled_height, self._scaled_width], method=method)
scaled_image = scaled_image[self._crop_offset_y:self._crop_offset_y +
self._output_size[0],
self._crop_offset_x:self._crop_offset_x +
self._output_size[1], :]
self._image = tf.image.pad_to_bounding_box(scaled_image, 0, 0,
self._output_size[0],
self._output_size[1])
# self._image = tf.cast(output_image, dtype) # TODO(@yuw): verify
# self._image = tf.transpose(self._image, [2, 0, 1])
return self._image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
"""Init."""
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1)
xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1)
ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1)
xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
# boxlist is in range of [0, 1], so here we pass the scale_height/width
# instead of just scale.
boxes = preprocessor.box_list_scale(boxlist, self._scaled_height, self._scaled_width).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack([
self._crop_offset_y,
self._crop_offset_x,
self._crop_offset_y,
self._crop_offset_x,
])
boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are illegal.
indices = tf.where(
tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]), 0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
"""Return image scale from original image to scaled image."""
return self._image_scale
@property
def image_scale_to_original(self):
"""Return image scale from scaled image to original image."""
return 1.0 / self._image_scale
@property
def offset_x(self):
"""Return offset of x."""
return self._crop_offset_x
@property
def offset_y(self):
"""Return offset of y."""
return self._crop_offset_y
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_instances_per_image, dimension].
"""
max_instances_per_image = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
msg = 'ERROR: please increase config.max_instances_per_image'
with tf.control_dependencies(
[tf.assert_less(num_instances, max_instances_per_image, message=msg)]):
pad_length = max_instances_per_image - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0) # noqa pylint: disable=E1123
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class CocoDataset(Dataset):
"""COCO Dataset."""
def __init__(self,
data_sources,
is_training,
use_fake_data=False,
max_instances_per_image=None,
sampling='uniform'):
"""Dataloader for COCO format dataset.
Args:
data_sources (DataSource): data source for train/val set.
is_training (bool): training phase.
use_fake_data (bool, optional): Whether to use fake data. Defaults to False.
max_instances_per_image (int, optional): Max number of instances to detect per image.
sampling (str, optional): sampling method. Defaults to 'uniform'.
"""
self._data_sources = data_sources
self._is_training = is_training
self._use_fake_data = use_fake_data
# COCO has 100 limit, but users may set different values for custom dataset.
self._max_instances_per_image = max_instances_per_image or 100
assert sampling in ['uniform', 'proportional'], \
f"Sampling method {sampling} is not supported."
self._sampling = sampling
@tf.autograph.experimental.do_not_convert
def dataset_parser(self, value, example_decoder, anchor_labeler, params):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: a single serialized tf.Example string.
example_decoder: TF example decoder.
anchor_labeler: anchor box labeler.
params: a dict of extra parameters.
Returns:
image: Image tensor that is preprocessed to have normalized value and
fixed dimension [image_height, image_width, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
source_id: Source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
image_scale: Scale of the processed image to the original image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_instances_per_image, 4].
is_crowds: Groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_instances_per_image].
areas: Groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
classes: Groundtruth classes annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
source_id = data['source_id']
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
areas = data['groundtruth_area']
is_crowds = data['groundtruth_is_crowd']
image_masks = data.get('groundtruth_instance_masks', [])
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if self._is_training:
# Training time preprocessing.
if params['skip_crowd_during_training']:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if params.get('auto_augment', None):
from nvidia_tao_tf2.cv.efficientdet.augmentation import autoaugment # noqa pylint: disable=C0415
if params['auto_color']:
if is_main_process():
logger.info("Auto color augmentation is enabled.")
image, boxes = autoaugment.distort_image_with_autocolor(
image, boxes, num_layers=1, magnitude=15)
if params['auto_translate_xy']:
if is_main_process():
logger.info("Auto translate_xy augmentation is enabled.")
image, boxes = autoaugment.distort_image_with_autotranslate(
image, boxes, num_layers=1, magnitude=15)
input_processor = DetectionInputProcessor(image, params['image_size'], boxes, classes)
input_processor.normalize_image()
if self._is_training:
if params['input_rand_hflip']:
input_processor.random_horizontal_flip()
input_processor.set_training_random_scale_factors(
params['jitter_min'], params['jitter_max'],
params.get('target_size', None))
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets,
num_positives) = anchor_labeler.label_anchors(boxes, classes)
source_id = tf.where(
tf.equal(source_id, tf.constant('')), '-1', source_id)
source_id = tf.strings.to_number(source_id)
# Pad groundtruth data for evaluation.
image_scale = input_processor.image_scale_to_original
boxes *= image_scale
is_crowds = tf.cast(is_crowds, dtype=tf.float32)
boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4])
is_crowds = pad_to_fixed_size(is_crowds, 0, [self._max_instances_per_image, 1])
areas = pad_to_fixed_size(areas, -1, [self._max_instances_per_image, 1])
classes = pad_to_fixed_size(classes, -1, [self._max_instances_per_image, 1])
if params['mixed_precision']:
dtype = get_mixed_precision_policy().compute_dtype
image = tf.cast(image, dtype=dtype)
box_targets = tf.nest.map_structure(
lambda box_target: tf.cast(box_target, dtype=tf.float32), box_targets)
return (image, cls_targets, box_targets, num_positives, source_id,
image_scale, boxes, is_crowds, areas, classes, image_masks)
@tf.autograph.experimental.do_not_convert
def process_example(self, params, batch_size, images, cls_targets,
box_targets, num_positives, source_ids, image_scales,
boxes, is_crowds, areas, classes, image_masks):
"""Processes one batch of data."""
labels = {}
# Count num_positives in a batch.
num_positives_batch = tf.reduce_mean(num_positives)
labels['mean_num_positives'] = tf.reshape(
tf.tile(tf.expand_dims(num_positives_batch, 0), [
batch_size,
]), [batch_size, 1])
if params['data_format'] == 'channels_first':
images = tf.transpose(images, [0, 3, 1, 2])
for level in range(params['min_level'], params['max_level'] + 1):
labels[f'cls_targets_{level}'] = cls_targets[level]
labels[f'box_targets_{level}'] = box_targets[level]
if params['data_format'] == 'channels_first':
labels[f'cls_targets_{level}'] = tf.transpose(
labels[f'cls_targets_{level}'], [0, 3, 1, 2])
labels[f'box_targets_{level}'] = tf.transpose(
labels[f'box_targets_{level}'], [0, 3, 1, 2])
# Concatenate groundtruth annotations to a tensor.
groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2) # noqa pylint: disable=E1123
labels['source_ids'] = source_ids
labels['groundtruth_data'] = groundtruth_data
labels['image_scales'] = image_scales
labels['image_masks'] = image_masks
return images, labels
@property
def dataset_options(self):
"""Dataset options."""
options = tf.data.Options()
options.experimental_deterministic = not self._is_training
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
return options
def __call__(self, params, batch_size=None):
"""Call."""
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask='segmentation' in params['heads'],
regenerate_source_id=params['regenerate_source_id'],
include_image=True, # TODO(@yuw): forced to True, as image must be encoded in tfrecords
)
batch_size = batch_size or params['batch_size']
datasets = []
weights = []
for file_pattern, image_dir in self._data_sources:
dataset = tf.data.Dataset.list_files(
file_pattern,
shuffle=params['shuffle_file'] and self._is_training)
if self._is_training:
dataset = dataset.shard(get_world_size(), get_rank())
dataset.shuffle(buffer_size=64)
# Prefetch data from files.
def _prefetch_dataset(filename):
if params.get('dataset_type', None) == 'sstable':
pass
else:
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.interleave(
_prefetch_dataset, cycle_length=params['cycle_length'], # cycle_length=32
block_length=params['block_length'], # block_length=16
num_parallel_calls=tf.data.experimental.AUTOTUNE) # TODO(@yuw): whether to fix the number
dataset = dataset.with_options(self.dataset_options)
if self._is_training:
dataset = dataset.shuffle(
buffer_size=params['shuffle_buffer'],
reshuffle_each_iteration=True,)
# Parse the fetched records to input tensors for model function.
# pylint: disable=g-long-lambda
map_fn = lambda value: self.dataset_parser(value, example_decoder, # noqa pylint: disable=C3001
anchor_labeler, params)
# pylint: enable=g-long-lambda
# TODO(@yuw): whether to make it configurable or tf.data.experimental.AUTOTUNE
core_count = multiprocessing.cpu_count() // 2 # note: // 2 to be conservative
dataset = dataset.map(
map_fn, num_parallel_calls=max(core_count // get_world_size(), 1))
# dataset = dataset.prefetch(batch_size)
if self._sampling and len(self._data_sources) > 1:
dataset = dataset.repeat()
datasets.append(dataset)
weights.append(1 if (not image_dir or image_dir == '/') else len(os.listdir(image_dir)))
if not self._sampling or len(self._data_sources) == 1 or not self._is_training:
combined = datasets[0]
for dataset in datasets[1:]:
combined = combined.concatenate(dataset)
elif self._sampling == 'proportional':
if is_main_process():
logger.info("Use Proportional Sampling")
total = sum(weights)
weights = [w / total for w in weights]
combined = tf.data.Dataset.sample_from_datasets(
datasets, weights=weights
)
elif self._sampling == 'uniform':
if is_main_process():
logger.info("Use Uniform Sampling")
weights = [1.0 / len(weights)] * len(weights)
combined = tf.data.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True,
)
else:
raise ValueError("Unexpected error in dataloader!")
combined = combined.batch(batch_size, drop_remainder=params['drop_remainder'])
combined = combined.map(
lambda *args: self.process_example(params, batch_size, *args))
combined = combined.prefetch(params['prefetch_size'] or tf.data.experimental.AUTOTUNE)
if self._is_training:
combined = combined.repeat()
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
combined = combined.take(1).cache().repeat()
return combined
| tao_tensorflow2_backend-main | nvidia_tao_tf2/cv/efficientdet/dataloader/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module optimization root module."""
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import math
import numpy as np
import pytest
from tensorflow import keras
from nvidia_tao_tf2.common.utils import set_random_seed
# from nvidia_tao_tf2.backbones.efficientnet_tf import EfficientNetB0, EfficientNetB1, EfficientNetB5
from nvidia_tao_tf2.backbones.resnet_tf import ResNet
from nvidia_tao_tf2.backbones import utils_tf
from nvidia_tao_tf2.model_optimization.pruning import pruning
from nvidia_tao_tf2.model_optimization.pruning.pruning import find_prunable_parent
set_random_seed(42)
class TestPruning(object):
"""Main class for pruning tests."""
def check_weights(self, pruned_model, granularity, min_num_filters, filter_counts):
for layer in pruned_model.layers:
weights = layer.get_weights()
if type(layer) in [
keras.layers.Conv2D,
keras.layers.DepthwiseConv2D,
keras.layers.Conv2DTranspose,
keras.layers.Dense,
]:
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError(f"Unhandled number of weights: {len(weights)}")
if type(layer) == keras.models.Model:
self.check_weights(layer,
granularity,
min_num_filters,
filter_counts.pop(layer.name)
)
elif type(layer) == keras.layers.Conv2DTranspose:
# we're not pruning these layers
filter_count = filter_counts[layer.name]
n_kept = kernels.shape[-2]
assert n_kept == filter_count['total']
if biases is not None:
assert n_kept == biases.shape[-1]
elif type(layer) in [keras.layers.BatchNormalization]:
# this should just propagate previous pruning
filter_count = filter_counts[layer.name]
to_prune = filter_count['to_prune']
# apply granularity and min
to_prune = min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
to_keep = filter_count['total'] - to_prune
if type(layer) == keras.layers.BatchNormalization:
assert all([len(w) == to_keep for w in weights]) # noqa pylint: disable=R1729
else:
assert all([type(w) == np.float32 for w in weights]) # noqa pylint: disable=R1729
elif type(layer) == keras.layers.DepthwiseConv2D:
# handle depthwiseconv2d specially.
n_kept = kernels.shape[-2]
if biases is not None:
assert n_kept == biases.shape[-1]
filter_count = filter_counts[layer.name]
if filter_count['total'] > min_num_filters:
assert n_kept >= min_num_filters
n_pruned = filter_count['total'] - n_kept
to_prune = filter_count['to_prune']
assert n_pruned == min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
elif weights:
# Checking weights for a conv2d layer.
n_kept = kernels.shape[-1]
if biases is not None:
# Make sure we pruned kernels and biases identically.
assert n_kept == biases.shape[-1]
filter_count = filter_counts[layer.name]
# Make sure we kept the min amount of filters.
if filter_count['total'] > min_num_filters:
assert n_kept >= min_num_filters
n_pruned = filter_count['total'] - n_kept
to_prune = filter_count['to_prune']
# Make sure the number of pruned filters matches
# the expected granularity.
assert n_pruned <= min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
def common(self,
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
check_output_on_input_shape=None,
layer_config_overrides=None,
equalization_criterion='union'):
"""Common denominator for most pruning tests.
This method sets weights such that half of the neurons should be pruned
considering the specified threshold but ignoring granularity, the min number
of filters to retain and excluded layers.
This method then proceeds to pruning the model and checks whether the expected
number of neurons has been pruned.
Args:
model: the model to prune.
method (str): pruning method.
normalizer (str): type of normalizer to use when pruning.
criterion (str): type of criterion to use when pruning.
granularity (int): granularity by which to prune filters.
min_num_filters (int): min number of filters to retain when pruning.
threshold (float): pruning threshold.
excluded_layers (list): list of layers to exclude when pruning.
check_output_on_input_shape (tuple): shape to use to verify inference (output shape
and activations), or ``None`` to skip inference
checks. For multiple inputs, this can also be
passed as a list of tuples.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding
layer configuration. Use cases include changing regularizers after pruning.
equalization_criterion (str): Criteria to equalize the stats of inputs to an element
wise op layer. Options are [arithmetic_mean, geometric_mean, union, intersection].
"""
if excluded_layers is None:
excluded_layers = []
assert criterion == 'L2'
# Targeted average norm of the filters to keep (actual weights will be
# randomly picked from a narrow uniform distribution).
keep_norm = threshold * 4.
if check_output_on_input_shape is not None:
# This test only works on activations for which f(0)=0, for example:
# "tanh", "relu", "linear".
for layer in model.layers:
if (layer.name not in excluded_layers and hasattr(layer, 'activation') and
layer.activation.__name__ not in ['linear', 'relu', 'tanh', 'swish']):
raise ValueError("Found unsupported activation in layer " # noqa pylint: disable=C0209
"named %s with type %s and activation type %s" %
(layer.name, type(layer), layer.activation.__name__))
if equalization_criterion in ['intersection', 'geometric_mean']:
raise ValueError("Unsupported merge layer equalization criterion for" # noqa pylint: disable=C0209
"pruning output check: %s." % equalization_criterion)
# Set the norm of neurons to prune to zero so we can match the unpruned
# model output with the pruned model output.
prune_norm = 0.
else:
# Just make neurons small enough to be pruned.
prune_norm = threshold / 4.
filter_counts = {}
filter_counts = self.set_weights(model, method, normalizer, criterion, granularity,
min_num_filters, keep_norm, prune_norm, excluded_layers,
threshold, equalization_criterion, filter_counts)
if check_output_on_input_shape is not None:
batch_size = 2
if isinstance(check_output_on_input_shape, list):
# Multiple-input case.
batch_data = []
for shape in check_output_on_input_shape:
batch_shape = (batch_size,) + shape
batch_data.append(np.random.random_sample(batch_shape))
else:
# single-input case.
batch_shape = (batch_size,) + check_output_on_input_shape
batch_data = np.random.random_sample(batch_shape)
output_before = model.predict(batch_data)
shape_before = output_before.shape
pruned_model = pruning.prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers,
equalization_criterion=equalization_criterion,
layer_config_overrides=layer_config_overrides)
pruned_model.summary()
self.check_weights(pruned_model, granularity, min_num_filters, filter_counts)
if check_output_on_input_shape is not None:
output_after = pruned_model.predict(batch_data)
shape_after = output_after.shape
assert shape_before == shape_after
assert np.allclose(output_before, output_after, rtol=1e-02, atol=1e-02)
return pruned_model
@staticmethod
def get_uniform(shape, mean, boundary=0.1):
"""Return a uniform distributed sample with a randomized sign.
Returns U(mean*(1-boundary), mean*(1+boundary)) with a random sign.
Args:
shape (list): shape of distribution to return.
mean (float): float of distribution to return.
boundary (float): relative multiplier to set range boundaries.
"""
x = np.random.uniform(low=mean * (1 - boundary), high=mean * (1 + boundary), size=shape)
x *= np.sign(np.random.normal(size=shape))
return x
def set_weights(self, model, method, normalizer, criterion, granularity, min_num_filters,
keep_norm, prune_norm, excluded_layers, threshold, equalization_criterion,
filter_counts):
# Pass 1 : Visit only prunable layers
for layer in model.layers:
weights = layer.get_weights()
norms = []
prune_indices = []
keep_indices = []
if type(layer) in [
keras.layers.Conv2D,
keras.layers.DepthwiseConv2D,
keras.layers.Conv2DTranspose,
keras.layers.Dense,
]:
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError(f"Unhandled number of weights: {len(weights)}")
if type(layer) == keras.models.Model:
filter_counts = self.set_weights(
layer, method, normalizer, criterion, granularity, min_num_filters, keep_norm,
prune_norm, excluded_layers, threshold, equalization_criterion, filter_counts)
elif type(layer) == keras.layers.Conv2DTranspose:
# expected kernel shape is (kernel_width, kernel_height, output_fmaps, input_fmaps)
n_filters = kernels.shape[-2]
# we are not pruning these layers
filter_counts[layer.name] = {
'to_keep': n_filters,
'to_prune': 0,
'total': n_filters,
'keep_indices': range(n_filters),
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif type(layer) in [
keras.layers.BatchNormalization,
]:
# Account for weights in the layer, but pass through during first pass
# waiting all prunable and element wise layers to be explored.
pass
elif type(layer) == keras.layers.Conv2D:
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels[:, :, :, 0].size
keep_norm_ = math.sqrt(keep_norm**2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm**2 / n_params_per_kernel)
for i in range(kernels.shape[-1]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], keep_norm_)
if biases is not None:
biases[i] = keep_norm_
n_keep += 1
keep_indices.append(i)
norms.append(keep_norm)
else:
# Prune that one.
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
norms.append(prune_norm)
n_prune += 1
prune_indices.append(i)
if biases is not None:
apply_weights = (kernels, biases)
else:
apply_weights = (kernels,)
layer.set_weights(apply_weights)
filter_counts[layer.name] = {
'layer_name': layer.name,
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif type(layer) == keras.layers.DepthwiseConv2D:
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels[:, :, 0, 0].size
keep_norm_ = math.sqrt(keep_norm ** 2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm ** 2 / n_params_per_kernel)
for i in range(kernels.shape[-2]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, :, i, 0] = self.get_uniform(kernels.shape[:2], keep_norm_)
if biases is not None:
biases[i] = keep_norm_
n_keep += 1
keep_indices.append(i)
norms.append(keep_norm)
else:
# Prune that one.
kernels[:, :, i, 0] = self.get_uniform(kernels.shape[:2], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
norms.append(prune_norm)
n_prune += 1
prune_indices.append(i)
if biases is not None:
layer.set_weights((kernels, biases))
else:
layer.set_weights((kernels,))
filter_counts[layer.name] = {'layer_name': layer.name,
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)}
elif type(layer) == keras.layers.Dense:
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels.shape[0]
keep_norm_ = math.sqrt(keep_norm**2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm**2 / n_params_per_kernel)
for i in range(kernels.shape[1]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, i] = self.get_uniform(kernels.shape[:1], keep_norm_)
n_keep += 1
if biases is not None:
biases[i] = keep_norm_
keep_indices.append(i)
norms.append(keep_norm_)
else:
# Prune that one.
kernels[:, i] = self.get_uniform(kernels.shape[:1], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
n_prune += 1
prune_indices.append(i)
norms.append(prune_norm_)
if biases is not None:
layer.set_weights((kernels, biases))
else:
layer.set_weights((kernels,))
filter_counts[layer.name] = {
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif weights:
raise RuntimeError(f"Unknown layer type={type(layer)} has weights.")
# Equalizing inputs for layers with element wise operations.
filter_counts = self._equalize_inputs(model, filter_counts, granularity, min_num_filters,
threshold, equalization_criterion, excluded_layers)
# Pass two: This time visit batchnorm layers.
for layer in model.layers:
if type(layer) in [
keras.layers.BatchNormalization,
]:
# We are just propagating the previous layer.
previous_layer = []
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
_inbound_layers = n.inbound_layers
# For some reason, tf.keras does not always put things in a list.
if not isinstance(_inbound_layers, list):
_inbound_layers = [_inbound_layers]
for _in_layer in _inbound_layers:
previous_layer.append(_in_layer.name)
filter_counts[layer.name] = filter_counts[previous_layer[0]]
if type(layer) == keras.layers.DepthwiseConv2D:
dw_parents = []
dw_parents = find_prunable_parent(dw_parents, layer, True)
filter_counts = self._match_dw_indices(dw_parents[0], layer, filter_counts,
min_num_filters, granularity, threshold,
equalization_criterion, excluded_layers)
return filter_counts
def _equalize_inputs(self,
model,
filter_counts,
granularity,
min_num_filters,
threshold,
equalization_criterion,
excluded_layers=None):
layer_types = {type(_layer) for _layer in model.layers}
if keras.models.Model in layer_types:
if layer_types != set([keras.layers.InputLayer, keras.models.Model]):
raise NotImplementedError("Model encapsulation is only supported if outer model"
"only consists of input layers.")
model_layer = [_layer for _layer in model.layers if (type(_layer) == keras.models.Model)]
if len(model_layer) > 1:
raise NotImplementedError("Model encapsulation is only supported if outer model"
"only includes one inner model")
return self._equalize_inputs(model_layer[0], filter_counts, granularity,
min_num_filters, equalization_criterion, excluded_layers)
# Iterating though model layers.
for layer in model.layers:
if type(layer) in [
keras.layers.Add, keras.layers.Subtract, keras.layers.Multiply,
keras.layers.Average, keras.layers.Maximum
]:
eltwise_prunable_inputs = []
eltwise_prunable_inputs = find_prunable_parent(eltwise_prunable_inputs, layer)
# Remove broadcast operation layers from mapping
for _layer in eltwise_prunable_inputs:
if _layer.filters == 1:
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(_layer))
# Do not update/match filter indices for eltwise layer inputs if they included
# in exclude layers.
# if not any(i.name in excluded_layers for i in eltwise_prunable_inputs):
if len(eltwise_prunable_inputs) > 1:
filter_counts = self._match_indices(
eltwise_prunable_inputs, filter_counts, min_num_filters, granularity, layer,
threshold, equalization_criterion, excluded_layers)
return filter_counts
def _match_indices(self, eltwise_prunable_inputs, filter_counts, min_num_filters, granularity,
layer, threshold, equalization_criterion, excluded_layers):
# Compute retainable filters.
output_depth = eltwise_prunable_inputs[0].filters
# workaround for depthwise layer, as layer.filters is None
for _layer in eltwise_prunable_inputs:
if type(_layer) == keras.layers.Conv2D:
output_depth = _layer.filters
if any(_layer.name in excluded_layers for _layer in eltwise_prunable_inputs):
matched_retained_idx = range(output_depth)
else:
cumulative_stat = np.array([])
for idx, l in enumerate(eltwise_prunable_inputs, 1):
layerwise_stat = filter_counts[l.name]['norms']
if not np.asarray(cumulative_stat).size:
cumulative_stat = layerwise_stat
elif equalization_criterion == 'union':
cumulative_stat = np.maximum(layerwise_stat, cumulative_stat)
elif equalization_criterion == 'intersection':
cumulative_stat = np.minimum(layerwise_stat, cumulative_stat)
elif equalization_criterion == "arithmetic_mean":
cumulative_stat = (cumulative_stat * (idx - 1) + layerwise_stat) / float(idx)
elif equalization_criterion == "geometric_mean":
cumulative_stat = np.power(
np.multiply(np.power(cumulative_stat, idx - 1), layerwise_stat),
float(1 / idx))
else:
raise NotImplementedError(f"Unknown equalization criterion: {equalization_criterion}")
output_idx = np.where(cumulative_stat > threshold)[0]
num_retained = len(output_idx)
min_num_filters = min(min_num_filters, output_depth)
num_retained = max(min_num_filters, num_retained)
if num_retained % granularity > 0:
num_retained += granularity - (num_retained % granularity)
num_retained = min(num_retained, output_depth)
sorted_idx = np.argsort(-cumulative_stat)
matched_retained_idx = np.sort(sorted_idx[:num_retained])
# Set filter counts for updated layers
for _layer in eltwise_prunable_inputs:
filter_counts[_layer.name]['keep_indices'] = matched_retained_idx
filter_counts[_layer.name]['prune_indices'] = np.setdiff1d(matched_retained_idx,
range(output_depth))
filter_counts[_layer.name]['to_keep'] = len(matched_retained_idx)
filter_counts[_layer.name]['to_prune'] = output_depth - len(matched_retained_idx)
filter_counts[_layer.name]['total'] = output_depth
return filter_counts
def _match_dw_indices(self, parent_layer, layer, filter_counts,
min_num_filters, granularity, threshold,
equalization_criterion, excluded_layers):
# Compute retainable filters for DepthwiseConv2D layer.
dw_layers = [parent_layer, layer]
output_depth = parent_layer.filters
if any(_layer.name in excluded_layers for _layer in dw_layers):
matched_retained_idx = range(output_depth)
else:
cumulative_stat = np.array([])
for idx, l in enumerate(dw_layers, 1):
layerwise_stat = filter_counts[l.name]['norms']
if not np.asarray(cumulative_stat).size:
cumulative_stat = layerwise_stat
elif equalization_criterion == 'union':
cumulative_stat = np.maximum(layerwise_stat, cumulative_stat)
elif equalization_criterion == 'intersection':
cumulative_stat = np.minimum(layerwise_stat, cumulative_stat)
elif equalization_criterion == "arithmetic_mean":
cumulative_stat = (cumulative_stat * (idx - 1) + layerwise_stat) / float(idx)
elif equalization_criterion == "geometric_mean":
cumulative_stat = np.power(np.multiply(np.power(cumulative_stat, idx - 1),
layerwise_stat), float(1 / idx))
else:
raise NotImplementedError(f"Unknown equalization criterion: {equalization_criterion}")
output_idx = np.where(cumulative_stat > threshold)[0]
num_retained = len(output_idx)
min_num_filters = min(min_num_filters, output_depth)
num_retained = max(min_num_filters, num_retained)
if num_retained % granularity > 0:
num_retained += granularity - (num_retained % granularity)
num_retained = min(num_retained, output_depth)
sorted_idx = np.argsort(-cumulative_stat)
matched_retained_idx = np.sort(sorted_idx[:num_retained])
# Set filter counts for updated layers
for _layer in dw_layers:
filter_counts[_layer.name]['keep_indices'] = matched_retained_idx
filter_counts[_layer.name]['prune_indices'] = np.setdiff1d(matched_retained_idx,
range(output_depth))
filter_counts[_layer.name]['to_keep'] = len(matched_retained_idx)
filter_counts[_layer.name]['to_prune'] = output_depth - len(matched_retained_idx)
filter_counts[_layer.name]['total'] = output_depth
return filter_counts
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"equalization_criterion, elmtwise_op, method, normalizer, criterion,"
"granularity, min_num_filters, threshold, dont_prune_elmtwise_input", [
(ResNet, 10, 'channels_first', True, (3, 128, 64), "union",
keras.layers.Add, 'min_weight', 'off', 'L2', 2, 8, 0.5, True),
(ResNet, 10, 'channels_first', False, (3, 128, 64), "union",
keras.layers.Subtract, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
])
def test_broadcast_ops(self, model, nlayers, data_format, use_batch_norm, input_shape,
equalization_criterion, elmtwise_op, normalizer, method, criterion,
granularity, min_num_filters, threshold, dont_prune_elmtwise_input):
"""Test broadcast element-wise operations."""
inputs = keras.layers.Input(shape=input_shape)
if model == ResNet: # pylint: disable=W0143
model = model(
nlayers,
inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
all_projections=True)
else:
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_1')(x)
if elmtwise_op != keras.layers.Subtract:
x2 = keras.layers.Conv2D(
filters=1,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name="broadcast_input")(x)
# Add branch.
x1 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
x1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_2')(x1)
# Add skip connection. Broadcast operations are not supported for subtract layers.
if elmtwise_op != keras.layers.Subtract:
x = elmtwise_op()([x1, x, x2])
else:
x = elmtwise_op()([x1, x])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
excluded_layers = ['conv2d_output']
if dont_prune_elmtwise_input:
excluded_layers.extend(['elmtwise_input_1'])
if equalization_criterion in ['intersection', 'geometric_mean']:
# Disable the output tests, as these criteria are not even supposed to work.
# Technically, geometric_mean might work when the merge is a multiplication,
# but since the setting is global, it is better not support it.
check_output_on_input_shape = None
else:
check_output_on_input_shape = input_shape
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': excluded_layers,
'check_output_on_input_shape': check_output_on_input_shape,
'equalization_criterion': equalization_criterion
}
# Pruning and check for pruned weights.
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"equalization_criterion, elmtwise_op, method, normalizer, criterion,"
"granularity, min_num_filters, threshold, dont_prune_elmtwise_input,", [
(ResNet, 10, 'channels_last', False, (128, 64, 3), "arithmetic_mean",
keras.layers.Average, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
(ResNet, 18, 'channels_last', True, (128, 64, 3), "union",
keras.layers.Subtract, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
])
def test_elmtwise_ops(self, model, nlayers, data_format, use_batch_norm, input_shape,
equalization_criterion, elmtwise_op, normalizer, method, criterion,
granularity, min_num_filters, threshold, dont_prune_elmtwise_input):
"""Test element-wise operations."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_1')(x)
# Add branch.
x1 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
x1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_2')(x1)
# Add skip connection.
x = elmtwise_op()([x1, x])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
excluded_layers = ['conv2d_output']
if dont_prune_elmtwise_input:
excluded_layers.extend(['elmtwise_input_1', 'elmtwise_input_2'])
if equalization_criterion in ['intersection', 'geometric_mean']:
# Disable the output tests, as these criteria are not even supposed to work.
check_output_on_input_shape = None
else:
check_output_on_input_shape = input_shape
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'equalization_criterion': equalization_criterion,
'excluded_layers': excluded_layers,
'check_output_on_input_shape': check_output_on_input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False,
(3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 1.),
(ResNet, 18, 'channels_last', True,
(256, 256, 3), 'min_weight', 'off', 'L2', 8, 16, 1e3),
])
def test_min_weight(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold):
"""Test that we retain min_num_filters.
This also tests the lower bound on thresholds.
"""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
pruned_model = pruning.prune(model, method, normalizer, criterion, granularity,
min_num_filters, threshold)
weights = pruned_model.get_weights()
assert all([w.shape[-1] == min_num_filters for w in weights]) # noqa pylint: disable=R1729
@pytest.mark.parametrize("data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first',
(3, 64, 96), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(None, (3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
])
def test_flatten(self, data_format, input_shape, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test that we can prune 'flatten' layer."""
inputs = keras.layers.Input(shape=input_shape)
if data_format is not None:
x = keras.layers.Conv2D(
filters=32,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format)(inputs)
else:
# Test pruning of flatten layer with unknown format (the API will
# verify that the previous layer was unpruned).
x = inputs
x = keras.layers.Activation('relu')(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(64, activation='relu')(x)
x = keras.layers.Dense(10, activation='linear', name='dense_output')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(ResNet, 18, 'channels_last', True,
(256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
])
def test_granularity(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
batch_shape = (1,) + input_shape
pruned_model = self.common(model, method, normalizer, criterion, granularity,
min_num_filters, threshold)
model = keras.models.Model(inputs=inputs, outputs=pruned_model(inputs), name=model.name)
model.predict(np.zeros(batch_shape))
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape_1,"
"input_shape_2, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False, (3, 128, 64),
(3, 64, 32), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_mimo(self, model, nlayers, data_format, use_batch_norm, input_shape_1, input_shape_2,
normalizer, method, criterion, granularity, min_num_filters, threshold):
"""Test the pruning of models with multiple inputs and multiple outputs."""
input_1 = keras.layers.Input(shape=input_shape_1)
model = model(nlayers, input_1, use_batch_norm=use_batch_norm, data_format=data_format)
x_1 = model.outputs[0]
input_2 = keras.layers.Input(shape=input_shape_2)
x_2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(input_2)
inputs = [input_1, input_2]
# Merge.
x = keras.layers.Concatenate(axis=1, name='output')([x_1, x_2])
# Add two branches on top.
out_1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='output_1')(x)
out_2 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='output_2')(x)
# Provide outputs in reverse creation order to verify fix in output ordering.
outputs = [out_2, out_1]
# Create model.
model = keras.models.Model(inputs=inputs, outputs=outputs)
batch_shape_1 = (8,) + input_shape_1
batch_shape_2 = (8,) + input_shape_2
batch = [np.zeros(batch_shape_1), np.zeros(batch_shape_2)]
shapes_before = [out.shape for out in model.predict(batch)]
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['output_1', 'output_2']}
pruned_model = self.common(*args, **kwargs)
pruned_model = keras.models.Model(
inputs=inputs, outputs=pruned_model(inputs), name=model.name)
shapes_after = [out.shape for out in pruned_model.predict(batch)]
assert shapes_before == shapes_after
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape_1,"
"input_shape_2, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False, (3, 128, 64),
(3, 64, 32), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_multiple_inputs(self, model, nlayers, data_format, use_batch_norm, input_shape_1,
input_shape_2, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test the pruning of models with multiple inputs."""
input_1 = keras.layers.Input(shape=input_shape_1)
model = model(nlayers, input_1, use_batch_norm=use_batch_norm, data_format=data_format)
out_1 = model.outputs[0]
input_2 = keras.layers.Input(shape=input_shape_2)
out_2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(input_2)
# Feed inputs in reverse creation order to verify fix in input order.
inputs = [input_2, input_1]
input_shapes = [input_shape_2, input_shape_1]
# Merge.
x = keras.layers.Concatenate(axis=1, name='output')([out_1, out_2])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='tanh',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output'], 'check_output_on_input_shape': input_shapes}
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first', (3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 0.5),
('channels_last', (256, 256, 3), 'min_weight', 'off', 'L2', 8, 16, 0.5),
])
def test_no_bias_in_conv_layer(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test that we can prune conv layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
use_bias=False,
kernel_initializer='glorot_uniform',
name='conv2d_1')(inputs)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['conv2d_output'],
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first',
(3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 0.5),
])
def test_no_bias_in_conv_transpose_layer(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test that we can prune conv layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d')(inputs)
x = keras.layers.Conv2DTranspose(
filters=8,
kernel_size=(2, 2),
strides=(2, 2),
padding='same',
data_format=data_format,
use_bias=False,
name='conv2d_transpose')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize(
"input_shape, data_format, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
((3, 16, 16), 'channels_first', 'min_weight', 'max', 'L2', 2, 2, 0.5),
])
def test_no_bias_in_dense_layer(self, input_shape, data_format, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test that we can prune dense layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(2, 2),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d_1')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(32, activation='relu', use_bias=False)(x)
x = keras.layers.Dense(16, activation='tanh', use_bias=True)(x)
x = keras.layers.Dense(10, activation='linear', name='dense_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"method, normalizer, criterion, granularity, min_num_filters,"
"threshold, equalization_criterion, all_projections",
[(ResNet, 18, 'channels_first', False,
(3, 512, 512), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", False),
(ResNet, 18, 'channels_first', False,
(3, 544, 960), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", True),
(ResNet, 10, 'channels_first', True,
(3, 960, 544), 'min_weight', 'off', 'L2', 2, 8, 0.5, "arithmetic_mean", True),
(ResNet, 10, 'channels_first', True,
(3, 128, 256), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", False)])
def test_resnets(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold,
equalization_criterion, all_projections):
"""Test partial pruning for MSRA resnet templates."""
# Set up Resnet model.
inputs = keras.layers.Input(shape=input_shape)
model = model(
nlayers,
inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
all_projections=all_projections)
x = model.outputs[0]
# Hooking up to fully connected layer for 10 classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(256, name='inner_fc', activation='relu')(x)
x = keras.layers.Dense(10, name='output_fc', activation='relu')(x)
# Setting up a model.
model = keras.models.Model(inputs=inputs, outputs=x)
# Define elementwise input layers alone as exclude layers.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'equalization_criterion': equalization_criterion,
'excluded_layers': excluded_layers,
'check_output_on_input_shape': input_shape
}
self.common(*args, **kwargs)
# @pytest.mark.parametrize(
# "model, data_format, input_shape,"
# "method, normalizer, criterion, granularity, min_num_filters,"
# "threshold, equalization_criterion",
# [(EfficientNetB0, 'channels_last',
# (512, 512, 3), 'min_weight', 'off', 'L2', 8, 8, 0.5, "union"),
# (EfficientNetB1, 'channels_last',
# (544, 960, 3), 'min_weight', 'off', 'L2', 2, 8, 0.5, "arithmetic_mean")
# ])
# def test_efficientnets(self, model, data_format, input_shape, normalizer,
# method, criterion, granularity, min_num_filters, threshold,
# equalization_criterion):
# """Test partial pruning for EfficientNet templates."""
# # Set up EfficientNet model.
# inputs = keras.layers.Input(shape=input_shape)
# model = model(
# input_tensor=inputs,
# input_shape=input_shape,
# data_format=data_format,
# classes=10)
# # Prune model and check weights.
# args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
# kwargs = {
# 'equalization_criterion': equalization_criterion,
# 'excluded_layers': ['predictions']
# }
# self.common(*args, **kwargs)
@pytest.mark.parametrize("data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
('channels_first',
(3, 128, 64), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_shared_layer(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test the pruning of models with shared layers."""
input_1 = keras.layers.Input(shape=input_shape)
input_2 = keras.layers.Input(shape=input_shape)
input_3 = keras.layers.Input(shape=input_shape)
inputs = [input_1, input_2, input_3]
# This layer will be applied to three different inputs.
conv_layer = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(2, 2), padding='same', data_format=data_format)
conv_layer_output_1 = conv_layer(input_1)
conv_layer_output_2 = conv_layer(input_2)
conv_layer_output_3 = conv_layer(input_3)
# Merge.
x = keras.layers.Concatenate(
axis=1, name='concat')([conv_layer_output_1, conv_layer_output_2, conv_layer_output_3])
x = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(x)
# Add named output layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
input_shapes = [input_shape, input_shape, input_shape]
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output'], 'check_output_on_input_shape': input_shapes}
self.common(*args, **kwargs)
@pytest.mark.parametrize("data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
('channels_first',
(3, 128, 64), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_shared_layer2(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test the pruning of models with shared layers."""
input_1 = keras.layers.Input(shape=input_shape)
input_2 = keras.layers.Input(shape=input_shape)
input_3 = keras.layers.Input(shape=input_shape)
inputs = [input_1, input_2, input_3]
# This layer will be applied to three different inputs.
c1 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
c2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
c3 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
conv_layer = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(2, 2), padding='same', data_format=data_format)
conv_layer_output_1 = conv_layer(c1(input_1))
conv_layer_output_2 = conv_layer(c2(input_2))
conv_layer_output_3 = conv_layer(c3(input_3))
# Merge.
x = keras.layers.Concatenate(
axis=1, name='concat')([conv_layer_output_1, conv_layer_output_2, conv_layer_output_3])
x = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(x)
# Add named output layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output']}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False,
(3, 256, 256), 'toto', 'off', 'L2', 4, 8, 0.5),
(ResNet, 10, 'channels_first', True,
(3, 256, 256), 'min_weight', 'toto', 'L2', 4, 8, 0.5),
(ResNet, 10, 'channels_first', False,
(3, 256, 256), 'min_weight', 'max', 'toto', 4, 8, 0.5),
])
def test_unknown_params(self, model, nlayers, data_format, use_batch_norm, input_shape,
normalizer, method, criterion, granularity, min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
with pytest.raises(NotImplementedError):
pruning.prune(model, method, normalizer, criterion, granularity,
min_num_filters, threshold)
def test_unsupported_layer(self):
"""Test that we drop an error on an unsupported layer."""
inputs = keras.layers.Input(shape=(3, 8, 4, 2))
# 3D layers are not currently supported.
x = keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format="channels_first")(
inputs
)
model = keras.models.Model(inputs, x)
with pytest.raises(NotImplementedError):
pruning.prune(model, "min_weight", "off", "L2", 8, 1, 0.01)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold",
[(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5),
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5)])
def test_with_averagepooling2d(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, method, normalizer, criterion, granularity,
min_num_filters, threshold):
"""Test with AveragePooling2D."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding AveragePooling2D node.
x = keras.layers.AveragePooling2D(
pool_size=(2, 2), data_format=data_format, padding='same')(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, exclude_permute_inputs",
[(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, True)])
def test_with_permute_layer(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, method, normalizer, criterion, granularity,
min_num_filters, threshold, exclude_permute_inputs):
"""Test with Permute layer."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding Permute Node.
x = keras.layers.Permute((1, 3, 2))(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
model.summary()
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
if exclude_permute_inputs:
excluded_layers.append("block_4a_conv_2")
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
# Catch error if permute inputs are not excluded.
if not exclude_permute_inputs:
with pytest.raises(NotImplementedError):
self.common(*args, **kwargs)
else:
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, zeropadding_dims",
[(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, 3),
(ResNet, 10, 'channels_first', False, (3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5,
(3, 2)), (ResNet, 10, 'channels_last', False,
(128, 64, 3), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, ((3, 2), (3, 2)))])
def test_with_zeropadding2D_layer(self, model, nlayers, data_format, use_batch_norm,
input_shape, num_classes, method, normalizer, criterion,
granularity, min_num_filters, threshold, zeropadding_dims):
"""Test with ZeroPadding2D."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding ZeroPadding2D Node.
x = keras.layers.ZeroPadding2D(
padding=zeropadding_dims, data_format=data_format)(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(ResNet, 10, 'channels_last', True,
(256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
])
def test_with_conv_transpose_head(self, model, nlayers, data_format, use_batch_norm,
input_shape, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
model = utils_tf.add_deconv_head(
model=model,
inputs=inputs,
nmaps=1,
upsampling=2,
data_format=data_format,
activation_type='relu')
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
check_output_on_input_shape=input_shape)
# @pytest.mark.parametrize(
# "model, nlayers, data_format, use_batch_norm, input_shape, method,"
# "normalizer, criterion, granularity, min_num_filters, threshold,"
# "excluded_layers",
# [(ResNet, 10, 'channels_first', False,
# (3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5, ['block_4a_conv_2'])])
# def test_reprune(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
# method, criterion, granularity, min_num_filters, threshold, excluded_layers):
# """Test that we can reprune a model.
# Args:
# model: the model template to use.
# nlayers (int): number of layers to build template of.
# data_format (str): one of 'channels_first' or 'channels_last'.
# use_batch_norm (bool): whether to use batchnorm.
# input_shape (tuple of ints): input shape.
# method (str): pruning method.
# normalizer (str): type of normalizer to use when pruning.
# criterion (str): type of criterion to use when pruning.
# granularity (int): granularity by which to prune filters.
# min_num_filters (int): min number of filters to retain when pruning.
# threshold (float): pruning threshold.
# excluded_layers (list): list of layers to exclude when pruning."""
# inputs = keras.layers.Input(shape=input_shape)
# model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
# pruned_model = self.common(
# model,
# method,
# normalizer,
# criterion,
# granularity,
# min_num_filters,
# threshold,
# excluded_layers=excluded_layers,
# check_output_on_input_shape=input_shape)
# # Apply pruned model to our inputs. When cloning a model, Keras internally
# # recreates all layers - this is different from applying the model to
# # another input, which creates another model but does not create new layers
# # (thus the layers are shared between models, which means layers have multiple
# # outbound nodes, making forward parsing ill-defined).
# # Below we are cloning the model and instructing Keras to use placeholders
# # for the new inputs (if we provide the same input layer as in the original
# # model, Keras will - wrongly? - re-create a new layer with the same name and
# # complain that two layers of the model have the same name!).
# pruned_model_applied = keras.models.clone_model(pruned_model)
# pruned_model_applied.set_weights(pruned_model.get_weights())
# # Note: at this stage a typical workflow would fine-tune the pruned model.
# # Now prune the model again and verify the output shape.
# self.common(
# pruned_model_applied,
# method,
# normalizer,
# criterion,
# granularity,
# min_num_filters,
# threshold,
# excluded_layers=excluded_layers,
# check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, concat_axis",
[(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, 1),
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, 2)])
def test_with_branches(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, normalizer, method, criterion, granularity, min_num_filters,
threshold, concat_axis):
"""Test concatenation head."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add 1st branch.
cov_channels = 1
x1 = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
# Add 2nd branch.
bbox_channels = 4 if concat_axis == 1 else 1
x2 = keras.layers.Conv2D(
filters=num_classes * bbox_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_x2')(x)
# Merge.
x = keras.layers.Concatenate(axis=concat_axis, name='output')([x1, x2])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=num_classes * (bbox_channels + cov_channels),
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': ['conv2d_x1', 'conv2d_x2', 'conv2d_output'],
'check_output_on_input_shape': input_shape
}
if concat_axis == 1: # Only channels_first is supported by this test.
self.common(*args, **kwargs)
else:
with pytest.raises(ValueError):
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5),
])
def test_with_concat_before_reshape(self, model, nlayers, data_format, use_batch_norm,
input_shape, num_classes, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test pruning in presence of concat layer following a reshape."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add 1st branch.
cov_channels = 1
x1 = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
x1 = keras.layers.Reshape((num_classes, cov_channels, int(x.shape[-2]),
int(x.shape[-1])))(x1)
# Add 2nd branch.
bbox_channels = 4
x2 = keras.layers.Conv2D(
filters=num_classes * bbox_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_x2')(x)
x2 = keras.layers.Reshape((num_classes, bbox_channels, int(x.shape[-2]),
int(x.shape[-1])))(x2)
# Merge.
x = keras.layers.Concatenate(axis=2, name='output')([x1, x2])
x = keras.layers.Reshape((num_classes * (bbox_channels + cov_channels), int(x.shape[-2]),
int(x.shape[-1])))(x)
x = keras.layers.Conv2D(
filters=8,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': ['conv2d_x1', 'conv2d_x2', 'conv2d_output'],
'check_output_on_input_shape': input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, prune_before_reshape", [
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 8, 'min_weight', 'off', 'L2', 2, 2, 0.5, True),
(ResNet, 10, 'channels_first', False,
(3, 128, 64), 8, 'min_weight', 'off', 'L2', 2, 2, 0.5, False),
])
def test_with_reshape(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, normalizer, method, criterion, granularity, min_num_filters,
threshold, prune_before_reshape):
"""Test pruning of reshape layer."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer
cov_channels = 2
x = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x')(x)
# Add reshape.
x = keras.layers.Reshape((num_classes, cov_channels, int(x.shape[-2]), int(x.shape[-1])))(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
model.summary()
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
if not prune_before_reshape:
kwargs = {'excluded_layers': ['conv2d_x'], 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
else:
with pytest.raises(NotImplementedError):
self.common(*args)
@pytest.mark.parametrize(
"model, nlayers, data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold ", [
(ResNet, 10, 'channels_first', (3, 128, 64), 'min_weight', 'off', 'L2', 2, 2, 0.5),
(ResNet, 10, 'channels_last', (64, 64, 3), 'min_weight', 'off', 'L2', 2, 2, 0.5),
])
def test_with_softmax(self, model, nlayers, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test pruning in presence of softmax layer."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, data_format=data_format)
x = model.outputs[0]
x = keras.layers.Conv2D(
filters=8,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Add softmax layer
if data_format == 'channels_first':
softmax_axis = 1
elif data_format == 'channels_last':
softmax_axis = -1
else:
raise ValueError(f"Unknown data format: {data_format}")
x = keras.layers.Softmax(axis=softmax_axis)(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
# Prune and check activations.
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers='conv2d_output',
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("data_format, input_shape,"
" method, normalizer, criterion, granularity,"
"min_num_filters, threshold ", [
('channels_first', (3, 128, 64),
'min_weight', 'off', 'L2', 2, 2, 0.5),
('channels_last', (64, 64, 3),
'min_weight', 'off', 'L2', 2, 2, 0.5),
])
def test_with_depthwise_conv_layer(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test pruning in presence of DepthwiseConv2D layer."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(32,
kernel_size=3,
strides=(1, 1),
padding='valid',
name='conv1',
data_format=data_format)(inputs)
x = keras.layers.DepthwiseConv2D((3, 3),
padding='valid',
strides=1,
depth_multiplier=1,
name='conv_dw_1',
data_format=data_format)(x)
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(model, method, normalizer, criterion,
granularity, min_num_filters, threshold)
@pytest.mark.parametrize("data_format, input_shape,"
" method, normalizer, criterion, granularity,"
"min_num_filters, threshold, depth_multiplier ", [
('channels_first', (3, 128, 64),
'min_weight', 'off', 'L2', 2, 2, 0.5, 2),
('channels_last', (64, 64, 3),
'min_weight', 'off', 'L2', 2, 2, 0.5, 3),
])
def test_depth_multiplier_not_one(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold,
depth_multiplier):
"""Test pruning in presence of DepthwiseConv2D layer."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(32,
kernel_size=3,
strides=(1, 1),
padding='valid',
name='conv1',
data_format=data_format)(inputs)
x = keras.layers.DepthwiseConv2D((3, 3),
padding='valid',
strides=1,
depth_multiplier=depth_multiplier,
name='conv_dw_1',
data_format=data_format)(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Will raise ValueError during explore stage when depth_multiplier is not 1.
with pytest.raises(ValueError):
self.common(model, method, normalizer, criterion,
granularity, min_num_filters, threshold)
def test_overrides(self):
"""Test that layer config overrides work."""
input_shape = (10,)
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Dense(3, activation='linear', name='dense_output')(inputs)
model = keras.models.Model(inputs=inputs, outputs=x)
layer_config_overrides = {
'bias_regularizer': keras.regularizers.l1(0.01),
'kernel_regularizer': keras.regularizers.l1(0.01),
'trainable': False
}
pruned_model = self.common(
model,
'min_weight',
'off',
'L2',
4,
8,
0.5,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape,
layer_config_overrides=layer_config_overrides)
# Verify that the overrides got applied.
for layer in pruned_model.layers:
# Overrides don't apply to input layers.
if isinstance(layer, keras.layers.InputLayer):
continue
for key in layer_config_overrides: # noqa pylint: disable=C0206
assert getattr(layer, key) == layer_config_overrides[key]
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/tests/test_pruning.py |
"""Unit tests for model optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO pruning.
This module includes APIs to prune a tf.keras model.
"""
import logging
import numpy as np
from tensorflow import keras
from nvidia_tao_tf2.backbones.utils_tf import swish
from nvidia_tao_tf2.common.decorators import override, subclass
from nvidia_tao_tf2.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf2.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
"""Logger for pruning APIs."""
logger = logging.getLogger(__name__)
TRAVERSABLE_LAYERS = [
keras.layers.BatchNormalization,
keras.layers.Activation,
keras.layers.Dropout,
keras.layers.Softmax,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
keras.layers.DepthwiseConv2D,
keras.layers.ZeroPadding2D,
keras.layers.ReLU,
keras.layers.TimeDistributed,
keras.layers.LeakyReLU,
keras.layers.UpSampling2D,
keras.layers.Conv2D,
keras.layers.SeparableConv2D,
ImageResizeLayer, WeightedFusion,
]
class Prune(object):
"""A class interface for the pruning operator."""
def _get_filter_stats(self, kernels, layer):
"""Return norms of all kernel filters.
Args:
kernels (Array): array of kernels to get retained indices of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
"""
raise NotImplementedError
def _get_retained_idx(self, explored_stat):
"""Return indices of filters to retain.
Args:
explored_stat (1-d array): array of kernel filter norms.
"""
raise NotImplementedError
@override
def prune(self, model):
"""Prune a model.
Args:
model (Model): the Keras model to prune.
Returns:
model (Model): the pruned model.
"""
raise NotImplementedError()
class PrunedLayer(object):
"""Class definition to store information about pruned layers.
Args:
retained_idx (list): list of retained indices.
output_tensor: Output tensor.
"""
def __init__(
self,
retained_idx,
explored_stat=None,
visited=False,
is_pruned=None,
keras_layer=None,
):
"""Initialization routine.
Args:
retained_idx (list): All filter indices that are above the pruning threshold and may be
retained.
explored_stat (list): Norms of the filter in the given layer to keep track for full
pruning of element-wise operations.
is_pruned (bool): Flag to mark layer as pruned if num of retained filters is
less than the number of incoming filters.
visited (bool): Flag to mark layer as visited during phase 2 of the pruning.
keras_layer (keras layer): Output of the current layer.
Returns:
Pruned_layer data structure.
"""
self.retained_idx = retained_idx
self.keras_layer = keras_layer
self.is_pruned = is_pruned
self.visited = visited
self.explored_stat = explored_stat
@subclass
class PruneMinWeight(Prune):
"""A class that implements pruning according to the "min weight method".
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
normalizer (str): 'max' to normalize by dividing each norm by the maximum norm within
a layer; 'L2' to normalize by dividing by the L2 norm of the vector comprising all
kernel norms.
criterion (str): only 'L2' is supported.
granularity (int): granularity of the number of filters to remove at a time.
min_num_filters (int): minimum number of filters to retain in each layer.
threshold (float): threshold to compare normalized norm against.
equalization_criterion (str): criteria to equalize element wise operation layers.
Supported criteria are 'arithmetic_mean', 'geometric_mean', 'union', 'intersection'.
excluded_layers (list): list of names of layers that should not be pruned. Typical usage
is for output layers of conv nets where the number of output channels must match
a number of classes.
byom_custom_layer (list): Option to specify BYOM custom layers. These layers will be
included in the pass-through layers.
"""
def __init__(
self,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
equalization_criterion="union",
byom_custom_layer=None
):
"""Initialization routine."""
self._normalizer = normalizer
self._criterion = criterion
self._granularity = granularity
self._min_num_filters = min_num_filters
self._equalization_criterion = equalization_criterion
self._equalization_groups = []
self._threshold = threshold
if excluded_layers is None:
excluded_layers = []
self._excluded_layers = excluded_layers
self._explored_layers = {}
if byom_custom_layer is None:
byom_custom_layer = []
self.byom_custom_layers = byom_custom_layer
self._visited = {}
@staticmethod
def _get_channel_index(data_format):
"""Return the channel index for the specified data format.
Args:
data_format (str): 'channels_first' or 'channels_last'.
"""
if data_format == "channels_first":
return -3
if data_format == "channels_last":
return -1
raise ValueError(f"Unknown data format: {data_format}")
def _get_data_format(self, layer):
# Return a layer's data format. Recurse through previous layers
# if necessary. If the data format cannot be determined, this
# function returns ``None``.
if hasattr(layer, "data_format"):
return layer.data_format
if type(layer) == keras.layers.TimeDistributed and hasattr(layer.layer, 'data_format'):
return layer.layer.data_format
if type(layer) in [keras.layers.Reshape,
keras.layers.Permute] or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Reshape,
keras.layers.Permute]):
# Reshape and Permute layers make it impossible to retrieve the data format.
return None
if type(layer) == keras.layers.Flatten or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) == keras.layers.Flatten):
# Flatten layers yield (N, K) tensors and can be considered
# either "channels_first" or "channels_last" indifferently.
# Let's pick "channels_first" (N, C, H, W) arbitrarily.
return "channels_first"
inbound_layers = _get_inbound_layers(layer)
if not inbound_layers:
# We have not found the data format.
return None
# Recurse through inbound layers.
data_formats = [self._get_data_format(inbound_l) for inbound_l in inbound_layers]
if len(set(data_formats)) > 1:
raise ValueError(
f"Found more than 1 data format in inbound layers: {repr(data_formats)}"
)
return data_formats[0]
def _get_previous_retained_idx(self, layer):
inbound_layers = _get_inbound_layers(layer)
if inbound_layers:
return self._explored_layers[inbound_layers[0].name].retained_idx
return None
@override
def _get_filter_stats(self, kernels, layer):
"""Return norms of all kernel filters.
This function implements the 'min_weight' and returns the norms of the filters
in the layer.
Args:
kernels (Array): array of kernels to get retained indices of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
Returns:
explored_stat (1-d array): array of pruning stats for individual kernels
"""
if self._criterion == "L2":
pruning_stat = get_L2_norm(kernels, layer)
else:
raise NotImplementedError(f"{self._criterion} pruning")
# Layer-wise normalization.
pruning_stat = normalize_stat(pruning_stat, self._normalizer)
return pruning_stat
def _merge_layerwise_stats(self, layerwise_stats):
"""Merge the layerwise pruning stats according to equalization_criterion.
Args:
layerwise_stats (2-d array): Array of pruning stats for individual kernels
in multiple weights.
Returns:
merged_stats (1-d array): Merged pruning stats.
"""
if type(layerwise_stats) == list:
layerwise_stats = np.stack(layerwise_stats)
assert (
layerwise_stats.ndim == 2
), "Layerwise stats has to be two dimensional array."
if self._equalization_criterion == "union":
# Equalizing input retained indices by computing union of the feature maps.
merged_stat = np.amax(layerwise_stats, axis=0)
elif self._equalization_criterion == "intersection":
# Equalizing input retained indices by computing intersection of the
# feature maps.
merged_stat = np.amin(layerwise_stats, axis=0)
elif self._equalization_criterion == "arithmetic_mean":
# Equalizing input retained indices by making sure the mean of the filter norms
# cross the threshold.
merged_stat = np.mean(layerwise_stats, axis=0)
elif self._equalization_criterion == "geometric_mean":
# Equalizing input retained indices by making sure the geometric mean of the
# filter norms cross the threshold.
# Numpy handles np.exp(np.log(0.0)) == 0.0 OK, although throws a warning.
log_stats = np.log(layerwise_stats)
merged_stat = np.exp(np.mean(log_stats, axis=0))
else:
raise NotImplementedError(
f"Unknown equalization criterion for element-wise operations: {self._equalization_criterion}"
)
return merged_stat
@override
def _get_retained_idx(self, explored_stat):
"""Return indices of filters to retain from explored stats (filter norms).
This function computes the filter indices to be retained at the end of pruning.
The number ouf filters is clamped to a multiple of the granularity.
Args:
explored_stat (1-d array): array of pruning stats for individual kernels
Returns:
retained_idx (1-d array): indices of filters to retain.
"""
retained_idx = np.where(explored_stat > self._threshold)[0]
# Compute depth of the layer before pruning.
orig_output_depth = len(explored_stat)
# Check granularity and minimum number of filters.
num_retained = len(retained_idx)
# Minimum number of filters - this shouldn't be more than the
# original number of filters.
min_num_filters = min(self._min_num_filters, orig_output_depth)
# Maintaining atleast filters >= min_num_filters.
num_retained = max(min_num_filters, num_retained)
# Clamping to the nearest multiple of granularity.
if num_retained % self._granularity > 0:
num_retained += self._granularity - (num_retained % self._granularity)
# Sanity check.
num_retained = min(num_retained, orig_output_depth)
# Sorting filter id's based on their pruning stat
sorted_idx = np.argsort(-explored_stat)
retained_idx = np.sort(sorted_idx[:num_retained])
return retained_idx
def _is_layer_pruned(self, layer):
# Recurse through previous layers until we find one that is explicitly
# pruned or not pruned.
if layer.name in self._explored_layers:
is_pruned = self._explored_layers[layer.name].is_pruned
if is_pruned is not None:
return is_pruned
inbound_layers = _get_inbound_layers(layer)
if inbound_layers:
return any([self._is_layer_pruned(l) for l in inbound_layers]) # noqa pylint: disable=R1729
return False
def _equalize_retained_indices(self, eltwise_prunable_inputs):
"""Equalize retained indices of all inputs to the element wise layer."""
if type(eltwise_prunable_inputs[0]) != keras.layers.TimeDistributed:
output_depth = eltwise_prunable_inputs[0].filters
else:
output_depth = eltwise_prunable_inputs[0].layer.filters
if any(layer.name in self._excluded_layers for layer in eltwise_prunable_inputs):
logger.debug(
"Skipping equalization of eltwise inputs: " # noqa pylint: disable=C0209
"{}".format(eltwise_prunable_inputs)
)
output_idx = range(output_depth)
else:
layerwise_stats = []
for prunable_input in eltwise_prunable_inputs:
layerwise_stats.append(
self._explored_layers[prunable_input.name].explored_stat
)
merged_stats = self._merge_layerwise_stats(layerwise_stats)
output_idx = self._get_retained_idx(merged_stats)
return output_idx
def _equalize_dw_retained_indices(self,
previous_stat,
this_stat,
previous_layer,
this_layer,
criterion):
"""Equalize the depth-wise conv. and its previous layer's retained indexes."""
dw_layers = [previous_layer, this_layer]
if type(dw_layers[0]) == keras.layers.TimeDistributed:
output_depth = dw_layers[0].layer.filters
else:
output_depth = dw_layers[0].filters
if any((layer.name in self._excluded_layers or 'fpn_cell' in layer.name) for layer in dw_layers):
logger.debug("Skipping equalization of depth-wise conv layers: {}".format(dw_layers)) # noqa pylint: disable=C0209
output_idx = range(output_depth)
else:
cumulative_stat = previous_stat
if criterion == "union":
# Equalizing input retained indices by computing union of the feature maps.
cumulative_stat = np.maximum(cumulative_stat, this_stat)
elif criterion == "intersection":
# Equalizing input retained indices by computing intersection of the
# feature maps.
cumulative_stat = np.minimum(cumulative_stat, this_stat)
elif criterion == "arithmetic_mean":
# Equalizing input retained indices by making sure the mean of the filter norms
# cross the threshold.
cumulative_stat = (cumulative_stat + this_stat) / 2.0
elif criterion == "geometric_mean":
# Equalizing input retained indices by making sure the geometric mean of the
# filter norms cross the threshold.
cumulative_stat = np.power(np.multiply(cumulative_stat,
this_stat), float(1 / 2.0))
else:
raise NotImplementedError(
f"Unknown equalization criterion for depth-wise conv operations: {criterion}")
# Clamp outputs to a multiple of the granularity
output_idx = self._get_retained_idx(cumulative_stat)
return output_idx
def _explore_conv_transpose_layer(self, layer):
# No transformation here, just propogate the number of output feature maps.
kernels, _, _ = self._unravel_weights(layer)
retained_idx = range(kernels.shape[2])
return retained_idx
def _explore_conv_or_fc_layer(self, layer):
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
# Identify filters to prune.
if layer.name in self._excluded_layers or 'fpn_cell' in layer.name:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = range(kernels.shape[-1])
else:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = kernels.shape[-1]
retained_neuron_count = len(retained_idx)
is_pruned = retained_neuron_count < initial_neuron_count
return retained_idx, explored_stat, is_pruned
def _explore_conv_dw_layer(self, layer):
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
# Raise error when it's a DepthwiseConv2D layer but depth_multiplier != 1
if kernels.shape[-1] != 1:
raise ValueError('DepthwiseConv2D for pruning can only have depth_multiplier == 1.')
# Identify filters to prune.
if layer.name in self._excluded_layers or 'fpn_cell' in layer.name:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = range(kernels.shape[2])
else:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = kernels.shape[2]
retained_neuron_count = len(retained_idx)
# apply equalization for depth-wise conv.
dw_layers = []
dw_layers = find_prunable_parent(dw_layers,
layer,
True,
visited=self._visited,
byom_custom_layers=self.byom_custom_layers)
self._update_equalization_groups(dw_layers + [layer])
previous_layer = dw_layers[0]
previous_stat = self._explored_layers[previous_layer.name].explored_stat
retained_idx = self._equalize_dw_retained_indices(previous_stat,
explored_stat,
previous_layer,
layer,
self._equalization_criterion)
retained_neuron_count = len(retained_idx)
self._explored_layers[previous_layer.name].retained_idx = retained_idx
is_pruned = retained_neuron_count < initial_neuron_count
self._explored_layers[previous_layer.name].is_pruned = is_pruned
return retained_idx, explored_stat, is_pruned
def _explore_conv_2d_gru_layer(self, layer):
# Retrieve weights: W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h.
weights = layer.get_weights()
# Identify filters to prune.
if layer.name in self._excluded_layers or not layer.trainable:
if layer.name not in self._excluded_layers:
logger.info("Skipping nontrainable layer: {}".format(layer.name)) # noqa pylint: disable=C0209
# Infer output channels from first kernel.
explored_stat = None
retained_idx = range(weights[0].shape[-1])
else:
layerwise_stats = []
# Do not take bias into account in the pruning decision,
# use only W_z, W_r, W_h, U_z, U_r, U_h.
for kernels in weights[:6]:
layerwise_stats.append(self._get_filter_stats(kernels, layer))
# Merge stats according to equalization criterion for determining joint pruned indices.
# This handles the elementwise ops in the layer.
explored_stat = self._merge_layerwise_stats(layerwise_stats)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = weights[0].shape[-1]
retained_neuron_count = len(retained_idx)
is_pruned = retained_neuron_count < initial_neuron_count
return retained_idx, explored_stat, is_pruned
def _update_equalization_groups(self, eltwise_prunable_inputs):
eq_groups = []
for g in self._equalization_groups:
for epi in eltwise_prunable_inputs:
if epi in g:
eq_groups.append(g)
break
merged_group = []
for g in eq_groups:
merged_group.extend(g)
for epi in eltwise_prunable_inputs:
if epi not in merged_group:
merged_group.append(epi)
for g in eq_groups:
idx = self._equalization_groups.index(g)
del self._equalization_groups[idx]
self._equalization_groups.append(merged_group)
return merged_group
def _explore_elmtwise_layer(self, layer):
eltwise_prunable_inputs = []
eltwise_prunable_inputs = find_prunable_parent(
eltwise_prunable_inputs, layer, byom_custom_layers=self.byom_custom_layers, visited=self._visited
)
logger.debug(
"At explore_elmtwise_layer: Prunable parents at layer {}".format(layer.name) # noqa pylint: disable=C0209
)
eltwise_prunable_inputs = list(set(eltwise_prunable_inputs))
for l in eltwise_prunable_inputs: # noqa pylint: disable=E741
logger.debug("Prunable_parents {}".format(l.name)) # noqa pylint: disable=C0209
# If any of the parents are broadcast layers, pop them out of prunable input list.
if type(l) != keras.layers.TimeDistributed and l.filters == 1:
# Set retained indices for this layer as 0.
self._explored_layers[l.name].retained_idx = range(l.filters)
self._explored_layers[l.name].is_pruned = False
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(l))
elif type(l) == keras.layers.TimeDistributed and l.layer.filters == 1:
# Set retained indices for this layer as 0.
self._explored_layers[l.name].retained_idx = range(l.layer.filters)
self._explored_layers[l.name].is_pruned = False
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(l))
# If newly updated eltwise true inputs have more than one branch, then
# equalize the retained indices.
if len(eltwise_prunable_inputs) > 1:
if type(layer) == WeightedFusion:
fixed_retained_idx = range(eltwise_prunable_inputs[0].filters)
else:
eltwise_prunable_inputs = self._update_equalization_groups(
eltwise_prunable_inputs
)
fixed_retained_idx = self._equalize_retained_indices(
eltwise_prunable_inputs
)
# Otherwise just prune the one conv layer as it was before.
elif len(eltwise_prunable_inputs) == 1:
layer_name = eltwise_prunable_inputs[-1].name
fixed_retained_idx = self._explored_layers[layer_name].retained_idx
else:
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
return range(kernels.shape[-1])
retained_idx = fixed_retained_idx
# Set the newly calculated retained indices for all eltwise prunable layers while,
# also checking if they were pruned during the equalization.
for i in eltwise_prunable_inputs:
self._explored_layers[i.name].retained_idx = retained_idx
initial_neuron_count = i.get_weights()[0].shape[-1]
pruned_state = len(retained_idx) < initial_neuron_count
self._explored_layers[i.name].is_pruned = pruned_state
# if the layer is a shared conv
if type(layer) == keras.layers.Conv2D:
logger.debug("Conv2D layer '{}' is shared.".format(layer.name)) # noqa pylint: disable=C0209
retained_idx, _, _ = self._explore_conv_or_fc_layer(layer)
return retained_idx
def _explore_td_layer(self, layer):
retained_idx = None
explored_stat = None
is_pruned = None
if type(layer.layer) in [
keras.layers.Conv2D,
keras.layers.Dense
]:
retained_idx, explored_stat, is_pruned = self._explore_conv_or_fc_layer(layer)
elif type(layer.layer) in [keras.layers.DepthwiseConv2D]:
retained_idx, explored_stat, is_pruned = self._explore_conv_dw_layer(layer)
return retained_idx, explored_stat, is_pruned
def _prune_explored_concat_layer(self, layer):
data_format = self._get_data_format(layer)
if data_format is not None:
channel_index = self._get_channel_index(data_format)
n_dims = len(layer.output_shape)
allowed_axes = [channel_index % n_dims, channel_index % -n_dims]
if layer.axis not in allowed_axes:
raise ValueError(
f"Concatenation layer only supported on channel axis: data_format={data_format} axis={layer.axis}"
)
else:
# The data format is unknown so we must make sure the previous layer was not pruned.
if self._is_layer_pruned(layer):
raise ValueError(
"Cannot process a concatenation layer if the data format "
"is unknown and one of the previous layers was pruned"
)
channel_index = layer.axis
previous_layers = [l for n in layer._inbound_nodes for l in n.inbound_layers] # noqa pylint: disable=E741
retained_indices = []
offset = 0
for l in previous_layers: # noqa pylint: disable=E741
if self._is_layer_pruned(l):
# Retain unpruned channels.
retained_idx = self._explored_layers[l.name].retained_idx
else:
# Retain all channels.
retained_idx = range(l.output_shape[channel_index])
shifted_indices = [idx + offset for idx in retained_idx]
retained_indices.extend(shifted_indices)
offset += l.output_shape[channel_index]
return retained_indices
def _prune_explored_split_layer(self, layer):
data_format = self._get_data_format(layer)
if data_format is not None:
channel_index = self._get_channel_index(data_format)
else:
channel_index = 1
previous_layers = _get_inbound_layers(layer)
pl = previous_layers[0]
assert not self._is_layer_pruned(pl), (
"Split layer's previous layer cannot be pruned. Try to add it "
"to excluded layer list."
)
total_channels = pl.output_shape[channel_index]
assert total_channels % layer.groups == 0, (
"The number of channels of previous layer should be a multiple "
"of the Split layer's group attribute."
)
n_channels = total_channels // layer.groups
return range(n_channels)
def _prune_explored_flatten_layer(self, layer):
# We need to take the activations of previous layer into account.
try:
previous_layer = layer._inbound_nodes[-1].inbound_layers[0]
except TypeError:
# tf.keras could, for some weird reason, not make inbound nodes / layers
# a list if there's only a single item for them.
previous_layer = layer._inbound_nodes[-1].inbound_layers
previous_layer_shape = previous_layer.output.get_shape()
previous_data_format = self._get_data_format(previous_layer)
previous_retained_idx = self._get_previous_retained_idx(layer)
if previous_data_format in ['channels_first', 'channels_last']:
assert previous_retained_idx is not None, ''
'Previous retrained index of Flatten layer cannot be None if data format'
' is known.'
if len(previous_layer_shape) != 4 and type(layer) != keras.layers.TimeDistributed:
raise ValueError(f"Expecting 4-dimensional activations got shape={repr(previous_layer_shape)}")
# Take the spatial size into account and create a mask of activations to
# retain from previous layer.
if previous_data_format == "channels_first":
# NCHW case.
inp_spatial_size = int(np.prod(previous_layer_shape[-2:]))
inp_num_fmaps = int(previous_layer_shape[-3])
retained_filter_mask = np.asarray([False] * inp_num_fmaps)
retained_filter_mask[previous_retained_idx] = True
retained_activation_mask = np.repeat(retained_filter_mask, inp_spatial_size)
elif previous_data_format == "channels_last":
# NHWC case.
inp_spatial_size = int(np.prod(previous_layer_shape[-3:-1]))
inp_num_fmaps = int(previous_layer_shape[-1])
retained_filter_mask = np.asarray([False] * inp_num_fmaps)
retained_filter_mask[previous_retained_idx] = True
retained_activation_mask = np.tile(retained_filter_mask, inp_spatial_size)
elif previous_data_format is None:
# The data format is unknown, make sure the previous layer was not pruned.
if self._is_layer_pruned(layer):
raise ValueError(
"Cannot process a pruned flatten layer if the "
"data format is unknown."
)
else:
raise ValueError(f"Unknown data format: {previous_data_format}")
if previous_data_format is not None:
retained_idx = np.where(retained_activation_mask)[0]
else:
retained_idx = None
return retained_idx
def _prune_explored_batch_norm_layer(self, layer):
# Propagate from previous layer.
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
weights = tuple([w[retained_idx] for w in layer.get_weights()]) # noqa pylint: disable=R1728
return weights
def _prune_explored_conv_or_fc_layer(self, layer):
# Retrieve weights.
kernels, biases, scale_factor = self._unravel_weights(layer)
previous_retained_idx = self._get_previous_retained_idx(layer)
# Remove incoming connections that have been pruned in previous layer.
is_conv2d = False
if type(layer) in [keras.layers.Conv2D]:
is_conv2d = True
elif (type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Conv2D]):
is_conv2d = True
if previous_retained_idx is not None:
if is_conv2d:
kernels = kernels[:, :, previous_retained_idx, :]
else:
kernels = kernels[previous_retained_idx, :]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError(f"{layer.name} not explored")
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
initial_neuron_count = kernels.shape[-1]
retained_neuron_count = len(retained_idx)
# Prune neurons from kernels and update layer spec.
if is_conv2d:
kernels = kernels[:, :, :, retained_idx]
if type(layer) in [keras.layers.Conv2D]:
layer.filters = retained_neuron_count
else:
layer.layer.filters = retained_neuron_count
else:
kernels = kernels[:, retained_idx]
if type(layer) in [keras.layers.Dense]:
layer.units = retained_neuron_count
else:
layer.layer.units = retained_neuron_count
# Prune neurons from biases.
if biases is not None:
biases = biases[retained_idx]
output_weights = (kernels, biases)
else:
output_weights = (kernels,)
# Set scale factor for QuantizedConv2D layer.
if scale_factor is not None:
output_weights += (scale_factor,)
msg = "layer %s: %d -> %d - actions: %s " % ( # noqa pylint: disable=C0209
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name, # noqa pylint: disable=C0209
)
logger.debug(msg)
return output_weights
def _prune_explored_conv_dw_layer(self, layer):
# Retrieve weights.
kernels, biases, scale_factor = self._unravel_weights(layer)
initial_neuron_count = kernels.shape[2]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError(f"{layer.name} not explored")
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
kernels = kernels[:, :, retained_idx, :]
retained_neuron_count = len(retained_idx)
# Prune neurons from biases.
if biases is not None:
biases = biases[retained_idx]
output_weights = (kernels, biases)
else:
output_weights = (kernels,)
# Set scale factor for QuantizedDWConv layer.
if scale_factor is not None:
output_weights += (scale_factor,)
msg = "layer %s: %d -> %d - actions: %s " % ( # noqa pylint: disable=C0209
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name, # noqa pylint: disable=C0209
)
logger.debug(msg)
return output_weights
def _prune_explored_conv_transpose_layer(self, layer):
previous_retained_idx = self._get_previous_retained_idx(layer)
kernels, biases, scale_factor = self._unravel_weights(layer)
kernels = kernels[:, :, :, previous_retained_idx]
weights = (kernels, biases) if biases is not None else (kernels,)
# Set scale factor for QuantizedConvTranspose layer.
if scale_factor is not None:
weights += (scale_factor,)
return weights
def _prune_explored_conv_2d_gru_layer(self, layer):
# Retrieve weights: W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h
weights = layer.get_weights()
previous_retained_idx = self._get_previous_retained_idx(layer)
# Remove incoming connections that have been pruned in previous layer.
if previous_retained_idx is not None:
# First three convlolution weights (W_z, W_r, W_h) operate on the input tensor.
for idx, kernels in enumerate(weights[:3]):
weights[idx] = kernels[:, :, previous_retained_idx, :]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError(f"{layer.name} not explored")
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
initial_neuron_count = weights[0].shape[-1]
retained_neuron_count = len(retained_idx)
# Remove incoming connections in the kernels that operate on the state (U_z, U_r, U_h).
for idx, kernels in enumerate(weights[3:6]):
weights[idx + 3] = kernels[:, :, retained_idx, :]
# Prune output channels from all kernels (W_z, W_r, W_h, U_z, U_r, U_h).
for idx, kernels in enumerate(weights[:6]):
weights[idx] = kernels[:, :, :, retained_idx]
# Prune output channels from biases (b_z, b_r, b_h).
for idx, biases in enumerate(weights[6:]):
weights[idx + 6] = biases[retained_idx]
# Update layer config.
layer.state_depth = retained_neuron_count
layer.initial_state_shape = list(layer.initial_state_shape)
layer.initial_state_shape[1] = retained_neuron_count
msg = "layer %s: %d -> %d - actions: %s " % ( # noqa pylint: disable=C0209
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name, # noqa pylint: disable=C0209
)
logger.debug(msg)
return weights
def _prune_explored_td_layer(self, layer):
weights = None
retained_idx = None
if type(layer.layer) in [
keras.layers.Conv2D,
keras.layers.Dense
]:
weights = self._prune_explored_conv_or_fc_layer(layer)
elif type(layer.layer) in [keras.layers.DepthwiseConv2D]:
weights = self._prune_explored_conv_dw_layer(layer)
elif type(layer.layer) in [keras.layers.BatchNormalization]:
weights = self._prune_explored_batch_norm_layer(layer)
elif type(layer.layer) == keras.layers.Flatten:
retained_idx = self._prune_explored_flatten_layer(layer)
elif type(layer.layer) == keras.layers.Concatenate:
retained_idx = self._prune_explored_concat_layer(layer)
else:
retained_idx = self._get_previous_retained_idx(layer)
return weights, retained_idx
def _unravel_weights(self, layer):
configs = layer.get_config()
is_quantized = ('quantize' in configs) and configs['quantize']
weights = layer.get_weights()
if is_quantized:
scaling_factor = weights[-1]
weights = weights[:-1]
else:
scaling_factor = None
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError(f"Unhandled number of weights: {len(weights)}")
return kernels, biases, scaling_factor
def _convert_to_list(self, obj):
if not isinstance(obj, list):
return [obj]
return obj
def _explore(self, model):
"""Explore a model for pruning and decide the feature-maps for all layers.
The model to prune must be a string of convolutional or fully-connected nodes. For example,
the nv-Helnet family of models, the VGG-xx family of models, the ResNet-xx family of
models, AlexNet or LeNet can be pruned using this API.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
Returns:
model (Model): the explored model.
"""
# Explore the model using Breadth First Traversal, starting from the input layers.
logger.info("Exploring graph for retainable indices")
layers_to_explore = model._input_layers
model_inputs = []
model_outputs = []
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
logger.debug("Exploring layer : {}".format(layer.name)) # noqa pylint: disable=C0209
go_to_another_layer = any([l.name not in self._explored_layers # noqa pylint: disable=R1729
for n in self._convert_to_list(layer._inbound_nodes)
for l in self._convert_to_list(n.inbound_layers)]) # noqa pylint: disable=E741
if go_to_another_layer:
# Some of the inbound layers have not been explored yet.
# Skip this layer for now, it will come back to the list
# of layers to explore as the outbound layer of one of the
# yet unexplored layers.
continue
retained_idx = None
explored_stat = None
outputs = None
is_pruned = None
# Layer-specific handling.
if type(layer) in [
keras.layers.DepthwiseConv2D
]:
retained_idx, explored_stat, is_pruned = self._explore_conv_dw_layer(layer)
elif type(layer) in [
keras.layers.Conv2D,
keras.layers.Dense
]:
elmtwise_inputs = _get_inbound_layers(layer)
# Handle pruning for keras element wise merge layers.
if len(layer._inbound_nodes) > 1 and len(set(elmtwise_inputs)) > 1:
# For eltwise layers check if all the inbound layers have been explored first.
if all(l.name in self._explored_layers for l in elmtwise_inputs): # noqa pylint: disable=E741
retained_idx = self._explore_elmtwise_layer(layer)
else:
continue
else:
# Explore current conv or fc layer for retainable feature maps.
retained_idx, explored_stat, is_pruned = self._explore_conv_or_fc_layer(
layer
)
elif type(layer) in [
keras.layers.Conv2DTranspose
]:
# Explore conv2d traspose layer for retainable indices.
retained_idx = self._explore_conv_transpose_layer(layer)
elif 'helper' in str(type(layer)) or type(layer) == keras.layers.Lambda:
# @scha: BYOM custom layers are in format of <class 'helper.CustomLayer'>
# Make sure that the previous layer was unpruned.
pass
elif type(layer) in [
keras.layers.Activation,
keras.layers.BatchNormalization,
keras.layers.Dropout,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.GlobalAveragePooling2D,
keras.layers.Softmax,
keras.layers.ReLU,
keras.layers.ELU,
keras.layers.LeakyReLU,
keras.layers.InputLayer,
keras.layers.ZeroPadding2D,
keras.layers.Flatten,
keras.layers.Concatenate,
keras.layers.UpSampling2D,
keras.layers.Cropping2D,
keras.models.Model,
keras.layers.SeparableConv2D,
ImageResizeLayer
]:
# These layers are just pass-throughs.
pass
# TODO: type(layer) == keras.layers.SeparableConv2D:
# _, pw_k, _ = layer.get_weights()
# # Identify filters to prune.
# if layer.name in self._excluded_layers:
# explored_stat = None
# retained_idx = range(pw_k.shape[-1])
# else:
# explored_stat = self._get_filter_stats(pw_k, layer)
# retained_idx = self._get_retained_idx(explored_stat)
# initial_neuron_count = pw_k.shape[-1]
# retained_neuron_count = len(retained_idx)
# is_pruned = retained_neuron_count < initial_neuron_count
elif type(layer) in [keras.layers.Reshape, keras.layers.Permute]:
# Make sure that the previous layer was unpruned.
if self._is_layer_pruned(layer):
if (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
retained_idx = None
is_pruned = None
else:
raise NotImplementedError(
"Reshape/Permute is not supported after a pruned layer."
)
else:
retained_idx = None
is_pruned = False
# Handle pruning for keras element wise merge layers.
elif type(layer) in [
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
WeightedFusion
]:
# For eltwise layers check if all the inbound layers have been explored first.
elmtwise_inputs = [
l for n in layer._inbound_nodes for l in n.inbound_layers # noqa pylint: disable=E741
]
if all(l.name in self._explored_layers for l in elmtwise_inputs): # noqa pylint: disable=E741
retained_idx = self._explore_elmtwise_layer(layer)
else:
continue
elif type(layer) == keras.layers.TimeDistributed:
retained_idx, explored_stat, is_pruned = self._explore_td_layer(layer)
elif type(layer) == keras.layers.Lambda:
if layer.name not in self._excluded_layers:
raise ValueError(
"Lambda layers must be explicitly excluded from pruning. " # noqa pylint: disable=C0209
"Met lambda layer with name {} that is not explicitly "
"excluded".format(layer.name)
)
# Once we have verified that the lambda layer is excluded from pruning, it
# can safely be assumed to be a pass-through.
pass
else:
# Raise not implemented error for layers that aren't supported.
raise NotImplementedError(f"Unknown layer type: {type(layer)}")
# Explore the input layer.
if type(layer) in [keras.layers.InputLayer]:
# Re-use the existing InputLayer.
outputs = layer.output
model_inputs.append(outputs)
retained_idx = None
else:
# Make sure there are no duplicates in the retained indices.
if retained_idx is not None:
assert len(retained_idx) == len(set(retained_idx)), (
f"Duplicates found in list of retained indices: {repr(retained_idx)}"
)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs.append(outputs)
# @scha: in BYOM there are cases where the duplicate nodes are appended
# As a result, the while loop in explore never ended
for node in outbound_nodes:
if node.outbound_layer not in layers_to_explore:
layers_to_explore.append(node.outbound_layer)
names_to_explore = [l.name for l in layers_to_explore] # noqa pylint: disable=E741
logger.debug(
"Updating layers to explore at {} to: {}".format( # noqa pylint: disable=C0209
layer.name, names_to_explore # noqa pylint: disable=C0209
)
)
# Save info about the layer we just explored.
self._explored_layers[layer.name] = PrunedLayer(
retained_idx, explored_stat=explored_stat, is_pruned=is_pruned
)
return model
@override
def prune(self, model, layer_config_overrides=None, output_layers_with_outbound_nodes=None):
"""Prune an alreayd explored model, contains the retained-indices for all layers.
The model to prune must be a string of convolutional or fully-connected nodes. For example,
the nv-Helnet family of models, the VGG-xx family of models, the Resnet-xx family of
models, AlexNet or LeNet can be pruned using this API.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding
layer configuration. Use cases include changing regularizers after pruning.
output_layers_with_outbound_nodes (list): Option to specify intermediate output layers
that have `outbound_nodes`.
Returns:
model (Model): the pruned model.
"""
# get `training` config for BN reconstruction
config_map = {l['name'] : l['inbound_nodes'] for l in model.get_config()['layers']} # noqa pylint: disable=E741
# Phase 1: Explore the model.
if not output_layers_with_outbound_nodes:
output_layers_with_outbound_nodes = []
model = self._explore(model)
# Phase 2: Prune the graph in Breadth First Search fashion, starting from the
# input layer.
logger.debug("Explored layers: {}".format(self._explored_layers.keys())) # noqa pylint: disable=C0209
logger.debug("Model layers: {}".format([l.name for l in model.layers])) # noqa pylint: disable=C0209
input_layer = [l for l in model.layers if ( # noqa pylint: disable=E741
type(l) in [keras.layers.InputLayer])]
layers_to_explore = input_layer
model_outputs = {}
logger.info("Pruning model and appending pruned nodes to new graph")
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not self._explored_layers[layer.name].visited:
# Check if all inbound layers explored for given layer.
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
go_to_another_layer = False
for n in inbound_nodes:
inbound_layers = n.inbound_layers
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
for l in inbound_layers: # noqa pylint: disable=E741
# if isinstance(l, dict):
# break
if not self._explored_layers[l.name].visited:
go_to_another_layer = True
break
if go_to_another_layer:
break
if go_to_another_layer:
# if not (len(prune_visited) == len(inbound_layers)):
# Some of the inbound layers have not gone through pruning phase yet.
# Skip this layer for now, it will come back to the list
# of layers to explore as the outbound layer of one of the
# yet unvisited layers.
continue
logger.debug("Pruning layer: {}".format(layer.name)) # noqa pylint: disable=C0209
weights = None
outputs = None
# Layer-specific handling. Carve weights out based on results from the
# explore phase.
if type(layer) in [
keras.layers.DepthwiseConv2D
]:
weights = self._prune_explored_conv_dw_layer(layer)
elif type(layer) in [
keras.layers.Conv2D,
keras.layers.Dense
]:
weights = self._prune_explored_conv_or_fc_layer(layer)
elif type(layer) in [keras.layers.BatchNormalization]:
weights = self._prune_explored_batch_norm_layer(layer)
elif type(layer) in [
keras.layers.Conv2DTranspose
]:
weights = self._prune_explored_conv_transpose_layer(layer)
elif type(layer) == keras.models.Model:
sub_model_layer = self.prune(layer)
weights = sub_model_layer.get_weights()
elif (type(layer) == keras.layers.Concatenate):
retained_idx = self._prune_explored_concat_layer(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) == keras.layers.Flatten:
retained_idx = self._prune_explored_flatten_layer(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) in [keras.layers.Reshape, keras.layers.Permute]:
# Make sure that the previous layer was unpruned.
if self._is_layer_pruned(layer):
if not (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
raise NotImplementedError(
"Reshape is not supported after a pruned layer."
)
if (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) in [
keras.layers.Activation,
keras.layers.Dropout,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.GlobalAveragePooling2D,
keras.layers.Softmax,
keras.layers.ZeroPadding2D,
keras.layers.ReLU,
keras.layers.ELU,
keras.layers.LeakyReLU,
keras.layers.InputLayer,
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
keras.layers.UpSampling2D,
keras.layers.Cropping2D,
ImageResizeLayer,
WeightedFusion,
keras.layers.SeparableConv2D
]:
# These layers have no weights associated with them. Hence no transformation
# but, propogate retained indices from the previous layer.
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
# elif type(layer) == keras.layers.SeparableConv2D:
# EXPERIMENTAL
# prev_retained_idx = self._get_previous_retained_idx(layer)
# self._explored_layers[layer.name].retained_idx = prev_retained_idx
# layer.filters = len(prev_retained_idx)
# TODO: address the issue from pruning the inner pw
# retained_idx = self._explored_layers[layer.name].retained_idx
# layer.filters = len(retained_idx)
# dw_k, pw_k, b = layer.get_weights()
# weights = [dw_k[:, :, prev_retained_idx, :],
# pw_k[:, :, prev_retained_idx, :][..., retained_idx],
# b[retained_idx]]
# raise NotImplementedError("Pruning doesn't support SeparableConv2D layer.")
elif 'helper' in str(type(layer)) or type(layer) == keras.layers.Lambda:
# @scha: BYOM custom layers are in format of <class 'helper.CustomLayer'>
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) in [keras.layers.InputLayer]:
pass
elif type(layer) == keras.layers.TimeDistributed:
weights, retained_idx = self._prune_explored_td_layer(layer)
if retained_idx is not None:
self._explored_layers[layer.name].retained_idx = retained_idx
else:
# Other layers are not going through any transformation here.
raise NotImplementedError(
f"Unsupported layer type for layer names {layer.name} of type {type(layer)}"
)
# Visit input layer.
if type(layer) in [keras.layers.InputLayer]:
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
else:
# Create new layer.
layer_config = layer.get_config()
# Apply layer config overrides.
if layer_config_overrides is not None:
for key in layer_config:
if key in layer_config_overrides:
layer_config[key] = layer_config_overrides[key]
with keras.utils.CustomObjectScope({'swish': swish}):
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
inbound_layers = node.inbound_layers
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
node_indices = []
for node_i in range(len(node.keras_inputs)):
node_indices.append(
node.keras_inputs[node_i]._keras_history.node_index)
if len(node_indices) == 0:
node_indices = 0
# for idx, l in enumerate(node.inbound_layers):
for idx, l in enumerate(inbound_layers):
keras_layer = self._explored_layers[l.name].keras_layer
# TF2
# node_indices = node.node_indices
# For some reason, tf.keras does not always put things in a list.
if not isinstance(node_indices, list):
node_indices = [node_indices]
kl_output = keras_layer.get_output_at(node_indices[idx])
prev_outputs.append(kl_output)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
if type(new_layer) in [keras.layers.BatchNormalization]:
if 'training' in config_map[layer.name][0][0][-1]:
outputs.append(
new_layer(
prev_outputs,
training=config_map[layer.name][0][0][-1]['training'])
)
else:
outputs.append(new_layer(prev_outputs))
else:
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
# Option to specify intermediate output layers that have
# have `outbound_nodes`
if layer.name in output_layers_with_outbound_nodes:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
self._explored_layers[layer.name].visited = True
self._explored_layers[layer.name].keras_layer = new_layer
else:
continue
# Create new keras model object from pruned specifications.
# Patch for duplicate outputs
output_names = []
for l in model.outputs: # noqa pylint: disable=E741
if l.name in model_outputs:
if l.name not in output_names:
output_names.append(l.name)
model_outputs = [model_outputs[name] for name in output_names]
# model_outputs = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(
inputs=model.inputs, outputs=model_outputs, name=model.name
)
return new_model
def prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
layer_config_overrides=None,
equalization_criterion="union",
output_layers_with_outbound_nodes=None,
byom_custom_layer=None
):
"""Prune a model.
The model to prune must be a Keras model, consisting of the following layer types:
- 2D convolutions, transpose convolutions,
- fully-connected,
- batch normalization.
The following non-parametric layer types are also supported:
- non-linear activations (sigmoid, ReLU, TanH, ...),
- flatten,
- concatenation,
- dropout,
- element-wise operations (add, subtract, ...),
- and more.
For example, the nv-Helnet family of models, the VGG-xx family of models, the ResNet-xx family
of models, AlexNet, LeNet or GoogleNet can be pruned using this API.
The inbound layers to element-wise operations should not be pruned.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
method (str): only 'min_weight' is supported.
normalizer (str): 'max' to normalize by dividing each norm by the maximum norm within
a layer; 'L2' to normalize by dividing by the L2 norm of the vector comprising all
kernel norms.
criterion (str): only 'L2' is supported.
granularity (int): granularity of the number of filters to remove at a time.
min_num_filters (int): minimum number of filters to retain in each layer.
threshold (float): threshold to compare normalized norm against.
excluded_layers (list): list of names of layers that should not be pruned. Typical usage
is for output layers of conv nets where the number of output channels must match
a number of classes.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding layer
configuration. Use cases include changing regularizers after pruning.
equalization_criterion (str): Criteria to equalize the stats of inputs to an element
wise op layer. Options are [arithmetic_mean, geometric_mean, union, intersection].
output_layers_with_outbound_nodes (list): Option to specify intermediate output layers
that have `outbound_nodes`.
byom_custom_layer (list): Option to specify BYOM custom layers. These layers will be
pass-through.
Returns:
model (Model): the pruned model.
"""
if excluded_layers is None:
excluded_layers = []
if method != "min_weight":
# We don't know how to support other pruning methods.
raise NotImplementedError(f"Unsupported pruning method: {method}")
pruner = PruneMinWeight(
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
equalization_criterion=equalization_criterion,
excluded_layers=excluded_layers,
byom_custom_layer=byom_custom_layer
)
return pruner.prune(model, layer_config_overrides, output_layers_with_outbound_nodes)
def _get_inbound_layers(layer):
"""Helper function to get the inbound layers of a given layer.
Needed because tf.keras treats the `inbound_layers` / `inbound_nodes` attributes as single
objects when there is only one of them, whereas keras treats them as lists regardless of how
many elements they hold.
Args:
layer (keras.layers.Layer | tf.keras.layers.Layer): Layer for which to get inbound layers.
Returns:
inbound_layers (list): List of inbound layers.
"""
inbound_layers = []
inbound_nodes = layer._inbound_nodes
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
_inbound_layers = n.inbound_layers
if not isinstance(_inbound_layers, list):
_inbound_layers = [_inbound_layers]
inbound_layers.extend(_inbound_layers)
return inbound_layers
def normalize_stat(stat, normalizer):
"""Normalize pruning statistics.
Args:
stat (Array): array of statistics to normalize
normalizer (str): either 'L2' (normalize by dividing by L2 norm) or
'max' (normalize by dividing by max)
Returns:
The normalized array.
"""
if normalizer == "L2":
stat = stat / np.sqrt(np.sum(stat ** 2)) * len(stat)
elif normalizer == "max":
stat = stat / np.max(stat)
elif normalizer != "off":
raise NotImplementedError(f"Invalid pruning normalizer: {normalizer}")
return stat
def get_L2_norm(kernels, layer):
"""Get the L2 norms of the filters for pruning.
Args:
kernels (Array): array of kernels to compute norms of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
Special treatment to the DepthwiseConv2D.
Returns:
A vector of L2 norms, one for each kernel.
"""
if type(layer) in [keras.layers.DepthwiseConv2D] or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.DepthwiseConv2D]):
# For DepthwiseConv2D, currently we only support depthwise_multiplier = 1.
# I.e., axis 3 is always of size 1, kernel shape = (K_h, K_w, C_in, 1)
norm = np.sqrt(np.sum(kernels**2, axis=(0, 1, 3)))
else:
norm = np.sqrt(np.sum(kernels**2, axis=tuple(range(kernels.ndim - 1))))
return norm
def find_prunable_parent(prunable_parents,
layer,
skip_root=False,
visited=None,
byom_custom_layers=None):
"""Recursive function to find the first prunable parent in the current branch.
Args:
prunable_parents (list): A list of prunable parents accumulated till before the current
layer was explored.
layer (keras layer object): Current layer being explored.
skip_root(bool): Whether or not to skip the root layer(the root of the recursion tree).
This is useful for depthwise conv case, because the current layer is prunable,
but we want to find its parent that is prunable rather than returning itself.
byom_custom_layers (list): List of layers from BYOM to be added to TRAVERSABLE_LAYERS.
Return:
A list of keras layers which are prunable inputs to the given layer.
"""
visited = visited or {}
if byom_custom_layers:
assert isinstance(byom_custom_layers, list), \
f"Invalid data type for byom_custom_layers, {type(byom_custom_layers)}"
TRAVERSABLE_LAYERS.extend(byom_custom_layers)
# exit if you have encountered a prunable parent.
if (type(layer) in [keras.layers.Conv2D,
keras.layers.Dense,
keras.layers.DepthwiseConv2D] and
len(layer._inbound_nodes) == 1) or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Conv2D,
keras.layers.Dense,
keras.layers.DepthwiseConv2D]):
if not skip_root:
prunable_parents.extend([layer])
return list(set(prunable_parents))
# If you hit a shape manipulation layer, drop an exception.
if type(layer) not in TRAVERSABLE_LAYERS:
raise NotImplementedError(
f"Pruning is not possible with {layer.name} layer " "in the way"
)
# Recurse across all branches to return prunable parents.
previous_layers = []
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
inbound_layers = n.inbound_layers
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
for l in inbound_layers: # noqa pylint: disable=E741
previous_layers.append(l)
for l in previous_layers: # noqa pylint: disable=E741
if visited and l.name in visited:
prunable_parents.extend(visited[l.name])
else:
# Skip the Input layers if there are multiple parents.
if type(l) not in [keras.layers.InputLayer]:
find_prunable_parent(prunable_parents,
l,
False,
visited,
byom_custom_layers)
visited[layer.name] = prunable_parents
return list(set(prunable_parents))
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/pruning/pruning.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf2.model_optimization.pruning import pruning
from nvidia_tao_tf2.model_optimization.pruning.pruning import prune
__all__ = ("prune", "pruning")
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/pruning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning.
This module includes APIs to prune a Keras models.
Pruning is currently supported only for sequential models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import logging.config
import os
import sys
import time
from nvidia_tao_tf2.common.path_utils import expand_path
from tensorflow import keras
from pruning import prune
"""Root logger for pruning app."""
logger = logging.getLogger(__name__)
def prune_app(
input_filename,
output_filename,
verbose,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers,
equalization_criterion,
output_layers_with_outbound_nodes
):
"""Wrapper around :any:`modulus.pruning.pruning.prune`.
Args:
input_filename (str): path to snapshot of model to prune
output_filename (str): output filename (defaults to $(input).pruned)
verbose (boolean): whether to print debug messages
See :any:`modulus.pruning.pruning.prune` for more information on the other arguments.
"""
start_time = time.time()
# Set up logging.
verbosity = "DEBUG" if verbose else "INFO"
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", level=verbosity
)
logger.info("Loading model from %s" % (input_filename)) # noqa pylint: disable=C0209
# Load model from disk.
model = keras.models.load_model(input_filename, compile=False)
logger.info("Original model - param count: %d" % model.count_params()) # noqa pylint: disable=C0209
# Create list of exclude layers from command-line, if provided.
if excluded_layers is not None:
excluded_layers = excluded_layers.split(",")
# Create list of output layers with outbound nodes from command-line, if provided.
if output_layers_with_outbound_nodes is not None:
output_layers_with_outbound_nodes = output_layers_with_outbound_nodes.split(",")
# Prune model given specified parameters.
new_model = prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=excluded_layers,
equalization_criterion=equalization_criterion,
output_layers_with_outbound_nodes=output_layers_with_outbound_nodes,
)
logger.info("New model - param count: %d" % new_model.count_params()) # noqa pylint: disable=C0209
if output_filename is None:
output_filename = input_filename + ".pruned"
logger.info("Saving pruned model into %s" % (output_filename)) # noqa pylint: disable=C0209
# Save pruned model to disk.
dirname = expand_path(os.path.dirname(output_filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
new_model.save(output_filename)
logger.debug("Done after %s seconds" % (time.time() - start_time,)) # noqa pylint: disable=C0209
def main(args=None):
"""Pruning application.
If MagLev was installed through ``pip`` then this application can be
run from a shell. For example::
$ maglev-prune model.h5 --threshold 0.1
See command-line help for more information.
Args:
args (list): Arguments to parse.
"""
# Reduce TensorFlow verbosity
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
parser = argparse.ArgumentParser(description="Prune a string of conv/fc nodes")
# Positional arguments.
parser.add_argument("input_filename", help="Input file (.h5 Keras snapshot)")
# Optional arguments.
parser.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output file (defaults to $(input_filename).pruned)",
)
parser.add_argument(
"--method",
type=str,
default="min_weight",
help="Pruning method (currently only 'min_weight' is supported)",
)
parser.add_argument(
"-n",
"--normalizer",
type=str,
default="max",
help="Normalizer type (off, L2, max)",
)
parser.add_argument(
"-c", "--criterion", type=str, default="L2", help="Criterion (L2, activations)"
)
parser.add_argument(
"-e",
"--excluded_layers",
type=str,
default=None,
help="Comma separated list of layers to be excluded from pruning.",
)
parser.add_argument(
"--output_layers_with_outbound_nodes",
type=str,
default=None,
help="Comma separated list of output layers that have outbound nodes.",
)
parser.add_argument(
"--equalization_criterion",
type=str,
default="union",
help="Equalization criterion to be used for inputs to an element-wise op.",
choices=["union", "intersection", "arithmetic_mean", "geometric_mean"],
)
parser.add_argument("-g", "--granularity", type=int, default=8, help="Granularity")
parser.add_argument(
"-m", "--min_num_filters", type=int, default=16, help="Min number of filters"
)
parser.add_argument(
"-t", "--threshold", type=float, default=0.01, help="Pruning threshold"
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose messages")
if not args:
args = sys.argv[1:]
args = vars(parser.parse_args(args))
prune_app(
args["input_filename"],
args["output"],
args["verbose"],
args["method"],
args["normalizer"],
args["criterion"],
args["granularity"],
args["min_num_filters"],
args["threshold"],
args["excluded_layers"],
args["equalization_criterion"],
args["output_layers_with_outbound_nodes"],
)
if __name__ == "__main__":
main()
| tao_tensorflow2_backend-main | nvidia_tao_tf2/model_optimization/pruning/app.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from setuptools import Command, find_packages, setup
import os
from os import path
import glob
__version_info__ = (1, 0, 0, 1)
_ROOT = os.path.abspath(os.path.dirname(__file__))
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_all_files(find_dir):
all_files = []
for check_path in os.listdir(find_dir):
full_path = path.join(find_dir, check_path)
if (path.isdir(full_path)):
all_files.extend(get_all_files(full_path))
else:
relative_path = path.relpath(full_path, _ROOT)
all_files.append(relative_path)
return all_files
all_config_files = get_all_files(path.join(_ROOT, path.join('nvdu', 'config')))
print("all_config_files: {}".format(all_config_files))
__version__ = '.'.join(map(str, __version_info__))
setup(
name = "nvdu",
version = __version__,
description = "Nvidia Dataset Utilities",
long_description = read('readme.md'),
long_description_content_type = 'text/markdown',
url = "https://github.com/NVIDIA/Dataset_Utilities",
author = "NVIDIA Corporation",
author_email = "[email protected]",
maintainer = "Thang To",
maintainer_email = "[email protected]",
license = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0. https://creativecommons.org/licenses/by-nc-sa/4.0/",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords = "nvdu, nvidia",
packages=find_packages(),
package_data={'': all_config_files},
include_package_data=True,
install_requires = [
"numpy",
"opencv-python",
"pyrr",
"PyWavefront==0.2.0",
"pyglet",
"fuzzyfinder"
],
extras_require = {
# "test": [ "pytest" ]
},
entry_points = {
"console_scripts": [
"nvdu_viz=nvdu.tools.test_nvdu_visualizer:main",
"nvdu_ycb=nvdu.tools.nvdu_ycb:main",
]
},
scripts=[],
) | Dataset_Utilities-master | setup.py |
Dataset_Utilities-master | nvdu/__init__.py |
|
Dataset_Utilities-master | nvdu/tools/__init__.py |
|
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import future
import os
from os import path
import numpy as np
import pyrr
from pyrr import Quaternion, Matrix33, Matrix44, Vector4
import urllib.request
import tarfile
import zipfile
import shutil
# from enum import IntEnum, unique
import argparse
import nvdu
from nvdu.core.nvdu_data import *
# =============================== Constant variables ===============================
# YCB_DIR_ORIGINAL = "ycb/original"
# YCB_DIR_ALIGNED = "ycb/aligned_m"
# YCB_DIR_ALIGNED_SCALED = "ycb/aligned_cm"
YCB_DATA_URL = "http://ycb-benchmarks.s3-website-us-east-1.amazonaws.com/data/"
YCB_URL_POST_FIX = "_google_16k" # Only support the 16k meshes at the moment
# @unique
# class YCBModelType(IntEnum):
class YCBModelType():
Original = 0 # Original model, no modifications
AlignedOnly = 1 # The model is aligned but doesn't get scaled
AlignedCm = 2 # The model is aligned and get scaled to centimeter unit
YCB_DIR = [
path.join('ycb', 'original'),
path.join('ycb', 'aligned_m'),
path.join('ycb', 'aligned_cm'),
]
YCB_OBJECT_SETTINGS = [
path.join('object_settings', '_ycb_original.json'),
path.join('object_settings', '_ycb_aligned_m.json'),
path.join('object_settings', '_ycb_aligned_cm.json'),
]
# =============================== Helper functions ===============================
def get_data_root_path():
nvdu_root_path = nvdu.__path__[0]
data_root_path = path.join(nvdu_root_path, "data")
return data_root_path
def get_config_root_path():
nvdu_root_path = nvdu.__path__[0]
config_root_path = path.join(nvdu_root_path, "config")
return config_root_path
def get_ycb_object_settings_path(ycb_model_type):
config_root_path = get_config_root_path()
ycb_object_settings_path = path.join(config_root_path, YCB_OBJECT_SETTINGS[ycb_model_type])
return ycb_object_settings_path
def get_ycb_root_dir(ycb_model_type):
return path.join(get_data_root_path(), YCB_DIR[ycb_model_type])
def get_ycb_object_url(ycb_obj_name):
ycb_obj_full_url = YCB_DATA_URL + "google/" + ycb_obj_name + YCB_URL_POST_FIX + ".tgz"
return ycb_obj_full_url
def get_ycb_object_dir(ycb_obj_name, model_type):
"""
Get the directory path of an ycb object
ycb_obj_name: name of the YCB object
model_type: YCBModelType - type of the YCB model
"""
ycb_obj_dir = path.join(get_ycb_root_dir(model_type), ycb_obj_name)
return ycb_obj_dir
def get_ycb_model_path(ycb_obj_name, model_type):
"""
Get the path to the .obj model of an ycb object
ycb_obj_name: name of the YCB object
model_type: YCBModelType - type of the YCB model
"""
ycb_obj_dir = path.join(get_ycb_root_dir(model_type), ycb_obj_name)
ycb_model_path = path.join(ycb_obj_dir, 'google_16k', 'textured.obj')
return ycb_model_path
def log_all_path_info():
ycb_dir_org = get_ycb_root_dir(YCBModelType.Original)
ycb_dir_aligned_cm = get_ycb_root_dir(YCBModelType.AlignedCm)
print("YCB original models: {}\nYCB aligned models in centimeter: {}".format(ycb_dir_org, ycb_dir_aligned_cm))
def log_path_info(ycb_obj_name):
ycb_obj_dir_org = get_ycb_object_dir(ycb_obj_name, YCBModelType.Original)
ycb_obj_dir_aligned_cm = get_ycb_object_dir(ycb_obj_name, YCBModelType.AlignedCm)
print("YCB object: '{}'\nOriginal model: {}\nAligned model:{}".format(ycb_obj_name, ycb_obj_dir_org, ycb_obj_dir_aligned_cm))
if not (path.exists(ycb_obj_dir_org) and path.exists(ycb_obj_dir_aligned_cm)):
print("WARNING: This YCB object model does not exist, please run 'nvdu_ycb --setup'")
def log_all_object_names():
print("Supported YCB objects:")
ycb_object_settings_org_path = get_ycb_object_settings_path(YCBModelType.Original)
all_ycb_object_settings = DatasetSettings.parse_from_file(ycb_object_settings_org_path)
for obj_name, obj_settings in all_ycb_object_settings.obj_settings.items():
# NOTE: The name of object in the object settings have postfix '_16k', we need to remove it
if obj_name.endswith('_16k'):
obj_name = obj_name[:-4]
print("'{}'".format(obj_name))
# =============================== Mesh functions ===============================
def transform_wavefront_file(src_file_path, dest_file_path, transform_matrix):
dest_dir = path.dirname(dest_file_path)
if (not path.exists(dest_dir)):
os.makedirs(dest_dir)
src_file = open(src_file_path, 'r')
dest_file = open(dest_file_path, 'w')
# Keep a separated non-translation matrix to use on the mesh's vertex normal
non_translation_matrix = Matrix44.from_matrix33(transform_matrix.matrix33)
# print("non_translation_matrix: {}".format(non_translation_matrix))
# Parse each lines in the original mesh file
for line in src_file:
line_args = line.split()
if len(line_args):
type = line_args[0]
# Transform each vertex
if (type == 'v'):
src_vertex = pyrr.Vector4([float(line_args[1]), float(line_args[2]), float(line_args[3]), 1.0])
dest_vertex = transform_matrix * src_vertex
dest_file.write("v {:.6f} {:.6f} {:.6f}\n".format(dest_vertex.x, dest_vertex.y, dest_vertex.z))
continue
# Transform each vertex normal of the mesh
elif (type == 'vn'):
src_vertex = pyrr.Vector4([float(line_args[1]), float(line_args[2]), float(line_args[3]), 1.0])
dest_vertex = non_translation_matrix * src_vertex
dest_vertex = pyrr.vector.normalize([dest_vertex.x, dest_vertex.y, dest_vertex.z])
dest_file.write("vn {:.6f} {:.6f} {:.6f}\n".format(dest_vertex[0], dest_vertex[1], dest_vertex[2]))
continue
dest_file.write(line)
src_file.close()
dest_file.close()
def extract_ycb_model(ycb_obj_name):
ycb_obj_dir = get_ycb_root_dir(YCBModelType.Original)
# Extract the .tgz to .tar
ycb_obj_local_file_name = ycb_obj_name + ".tgz"
ycb_obj_local_path = path.join(ycb_obj_dir, ycb_obj_local_file_name)
print("Extracting: '{}'".format(ycb_obj_local_path))
tar = tarfile.open(ycb_obj_local_path, 'r:gz')
tar.extractall(path=ycb_obj_dir)
tar.close()
def download_ycb_model(ycb_obj_name, auto_extract=False):
"""
Download an ycb object's 3d models
ycb_obj_name: string - name of the YCB object to download
auto_extract: bool - if True then automatically extract the downloaded tgz
"""
ycb_obj_full_url = get_ycb_object_url(ycb_obj_name)
ycb_obj_local_file_name = ycb_obj_name + ".tgz"
ycb_obj_local_dir = get_ycb_root_dir(YCBModelType.Original)
ycb_obj_local_path = path.join(ycb_obj_local_dir, ycb_obj_local_file_name)
if (not path.exists(ycb_obj_local_dir)):
os.makedirs(ycb_obj_local_dir)
print("Downloading:\nURL: '{}'\nFile:'{}'".format(ycb_obj_full_url, ycb_obj_local_path))
urllib.request.urlretrieve(ycb_obj_full_url, ycb_obj_local_path)
if (auto_extract):
extract_ycb_model(ycb_obj_name)
def align_ycb_model(ycb_obj_name, ycb_obj_settings=None):
# Use the default object settings file if it's not specified
if (ycb_obj_settings is None):
ycb_object_settings_org_path = get_ycb_object_settings_path(YCBModelType.Original)
all_ycb_object_settings = DatasetSettings.parse_from_file(ycb_object_settings_org_path)
ycb_obj_settings = all_ycb_object_settings.get_object_settings(ycb_obj_name)
if (ycb_obj_settings is None):
print("Can't find settings of object: '{}'".format(ycb_obj_name))
return
src_file_path = get_ycb_model_path(ycb_obj_name, YCBModelType.Original)
dest_file_path = get_ycb_model_path(ycb_obj_name, YCBModelType.AlignedCm)
# Transform the original models
print("Align model:\nSource:{}\nTarget:{}".format(src_file_path, dest_file_path))
# Use the fixed transform matrix to align YCB models
transform_wavefront_file(src_file_path, dest_file_path, ycb_obj_settings.initial_matrix)
src_dir = path.dirname(src_file_path)
dest_dir = path.dirname(dest_file_path)
# Copy the material and texture to the new directory
shutil.copy(path.join(src_dir, 'textured.mtl'), path.join(dest_dir, 'textured.mtl'))
shutil.copy(path.join(src_dir, 'texture_map.png'), path.join(dest_dir, 'texture_map.png'))
def setup_all_ycb_models():
"""
Read the original YCB object settings
For each object in the list:
Download the 16k 3d model
Extract the .tgz file
Convert the original model into the aligned one
"""
ycb_object_settings_org_path = get_ycb_object_settings_path(YCBModelType.Original)
all_ycb_object_settings = DatasetSettings.parse_from_file(ycb_object_settings_org_path)
for obj_name, obj_settings in all_ycb_object_settings.obj_settings.items():
# NOTE: The name of object in the object settings have postfix '_16k', we need to remove it
if obj_name.endswith('_16k'):
obj_name = obj_name[:-4]
print("Setting up object: '{}'".format(obj_name))
download_ycb_model(obj_name, True)
align_ycb_model(obj_name, obj_settings)
# break
# =============================== Main ===============================
def main():
parser = argparse.ArgumentParser(description='NVDU YCB models Support')
parser.add_argument('ycb_object_name', type=str, nargs='?',
help="Name of the YCB object to check info", default=None)
parser.add_argument('-s', '--setup', action='store_true', help="Setup the YCB models for the FAT dataset", default=False)
parser.add_argument('-l', '--list', action='store_true', help="List all the supported YCB objects", default=False)
args = parser.parse_args()
if (args.list):
log_all_object_names()
if (args.setup):
setup_all_ycb_models()
else:
if (args.ycb_object_name):
log_path_info(args.ycb_object_name)
else:
# Print the info
log_all_path_info()
if __name__ == "__main__":
main() | Dataset_Utilities-master | nvdu/tools/nvdu_ycb.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#!/usr/bin/env python
try:
import pyglet
except Exception as ex:
print("Can't import pyglet module: {}".format(ex))
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from ctypes import *
import argparse
from os import path
import sys
import nvdu
from nvdu.core import *
from nvdu.viz.nvdu_visualizer import *
from nvdu.viz.nvdu_viz_window import *
from nvdu.tools.nvdu_ycb import *
CAMERA_FOV_HORIZONTAL = 90
# By default, just visualize the current directory
DEFAULT_data_dir_path = '.'
DEFAULT_output_dir_path = ''
DEFAULT_CAMERA_SETTINGS_FILE_PATH = './config/camera_lov.json'
data_dir_path = DEFAULT_data_dir_path
# mesh_file_name = 'textured.obj'
# ============================= MAIN =============================
def main():
DEFAULT_WINDOW_WIDTH = 0
DEFAULT_WINDOW_HEIGHT = 0
# Launch Arguments
parser = argparse.ArgumentParser(description='NVDU Data Visualiser')
parser.add_argument('dataset_dir', type=str, nargs='?',
help="Dataset directory. This is where all the images (required) and annotation info (optional) are. Default is the current directory", default=DEFAULT_data_dir_path)
parser.add_argument('-m', '--model_dir', type=str, help="Model directory. Default is <path to nvdu module>/data/ycb/original/", default=get_ycb_root_dir(YCBModelType.Original))
parser.add_argument('-a', '--data_annot_dir', type=str, help="Directory path - where to find the annotation data. Default is the same directory as the dataset directory", default="")
parser.add_argument('-s', '--size', type=int, nargs=2, metavar=('WIDTH', 'HEIGHT'), help="Window's size: [width, height]. If not specified then the window fit the resolution of the camera", default=[DEFAULT_WINDOW_WIDTH, DEFAULT_WINDOW_HEIGHT])
parser.add_argument('-o', '--object_settings_path', type=str, help="Object settings file path")
parser.add_argument('-c', '--camera_settings_path', type=str, help="Camera settings file path", default=None)
parser.add_argument('-n', '--name_filters', type=str, nargs='*', help="The name filter of each frame. e.g: *.png", default=["*.png"])
parser.add_argument('--fps', type=float, help="How fast do we want to automatically change frame", default=10)
parser.add_argument('--auto_change', action='store_true', help="If specified, the visualizer will automatically change the frame", default=False)
parser.add_argument('-e', '--export_dir', type=str, help="Directory path - where to store the visualized images. If specified, the script will automatically export the visualized image to the export directory. If not specified, the current directory will be used.", default='')
parser.add_argument('--auto_export', action='store_true', help="If specified, the visualizer will automatically export the visualized frame to image file in the `export_dir` directory", default=False)
parser.add_argument('--ignore_fixed_transform', action='store_true', help="If specified, the visualizer will not use the fixed transform matrix for the 3d model", default=False)
# parser.add_argument('--gui', type=str, help="Show GUI window")
# subparsers = parser.add_subparsers(help='sub-command help')
# parser_export = subparsers.add_parser('export', help="Export the visualized frames to image files")
# parser_export.add_argument('--out_dir', type=str, help="Directory path - where to store the visualized images. If this is set, the script will automatically export the visualized image to the export directory", default='viz')
# parser_export.add_argument('--movie_name', type=str, help="Name of the movie", default='viz.mp4')
# parser_export.add_argument('--movie_fps', type=float, help="Framerate of the generated movie", default=30)
args = parser.parse_args()
print("args: {}".format(args))
# TODO: May want to add auto_export as a launch arguments flag
# auto_export = not (not args.export_dir)
auto_export = args.auto_export
dataset_dir_path = args.dataset_dir
data_annot_dir_path = args.data_annot_dir if (args.data_annot_dir) else dataset_dir_path
name_filters = args.name_filters
print("name_filters: {}".format(name_filters))
viz_dataset = NVDUDataset(dataset_dir_path, data_annot_dir_path, name_filters)
# frame_count = viz_dataset.scan()
# print("Number of frames in the dataset: {}".format(frame_count))
# NOTE: Just use the YCB models path for now
model_dir_path = args.model_dir
object_settings_path = args.object_settings_path
camera_settings_path = args.camera_settings_path
# If not specified then use the default object and camera settings files from the dataset directory
if (object_settings_path is None):
object_settings_path = NVDUDataset.get_default_object_setting_file_path(dataset_dir_path)
if (camera_settings_path is None):
camera_settings_path = NVDUDataset.get_default_camera_setting_file_path(dataset_dir_path)
dataset_settings = DatasetSettings.parse_from_file(object_settings_path, model_dir_path)
if (dataset_settings is None):
print("Error: Could not locate dataset settings at {}".format(object_settings_path))
else:
if path.exists(camera_settings_path):
camera_json_data = json.load(open(camera_settings_path))
dataset_settings.exporter_settings = ExporterSettings.parse_from_json_data(camera_json_data)
camera_intrinsic_settings = CameraIntrinsicSettings.from_json_file(camera_settings_path)
if (camera_intrinsic_settings is None):
print("Error: Could not locate camera settings at {}".format(camera_settings_path))
if (dataset_settings is None or camera_intrinsic_settings is None):
exit(1)
# print("camera_intrinsic_settings: {} - {}".format(camera_settings_path, camera_intrinsic_settings))
# By default fit the window size to the resolution of the images
# NOTE: Right now we don't really support scaling
width, height = args.size
if (width <= 0) or (height <= 0):
width = camera_intrinsic_settings.res_width
height = camera_intrinsic_settings.res_height
main_window = NVDUVizWindow(width, height, 'NVDU Data Visualiser')
main_window.visualizer.dataset_settings = dataset_settings
main_window.visualizer.visualizer_settings.ignore_initial_matrix = args.ignore_fixed_transform
main_window.dataset = viz_dataset
main_window.set_auto_fps(args.fps)
main_window.should_export = auto_export
main_window.set_auto_change_frame(args.auto_change)
main_window.export_dir = args.export_dir
main_window.setup()
main_window.set_camera_intrinsic_settings(camera_intrinsic_settings)
pyglet.app.run()
if __name__ == '__main__':
main()
| Dataset_Utilities-master | nvdu/tools/test_nvdu_visualizer.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode)
from .scene_object import *
class Box2d(SceneObject):
# Create a box from its border
def __init__(self, left, right, top, bottom):
super(BoundingBox2d, self).__init__()
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.generate_vertexes()
def get_width():
return (self.right - self.left)
def get_height():
return (self.bottom - self.top)
def get_size():
return [self.get_width(), self.get_height()] | Dataset_Utilities-master | nvdu/core/box.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from .utils3d import *
from .transform3d import *
class SceneObject(object):
def __init__(self, in_parent_object = None):
# Relative transform relate to the parent object
self._relative_transform = transform3d()
# List of child object attached to this scene object
self.child_objects = []
# The object which this object is attached to
self.parent_object = in_parent_object
self.attach_to_object(in_parent_object)
def attach_to_object(self, new_parent_object):
if (self.parent_object):
self.parent_object.remove_child_object(self)
self.parent_object = new_parent_object
if (self.parent_object):
self.parent_object.child_objects.append(self)
def remove_child_object(self, child_object_to_be_removed):
print('***********************************')
if (child_object_to_be_removed):
try:
self.child_objects.remove(child_object_to_be_removed)
except ValueError:
pass
def set_relative_transform(self, new_location, new_quaternion):
self._relative_transform.set_location(new_location)
self._relative_transform.set_quaternion(new_quaternion)
def get_relative_transform(self):
return self._relative_transform
def get_relative_transform_matrix(self):
return self._relative_transform.to_matrix()
def get_world_transform_matrix(self):
"""Get the World-to-Object transform matrix"""
if (self.parent_object is None):
return self.get_relative_transform_matrix()
parent_world_matrix = self.parent_object.get_world_transform_matrix()
world_transform_matrix = parent_world_matrix * self.get_relative_transform_matrix()
return world_transform_matrix
| Dataset_Utilities-master | nvdu/core/scene_object.py |
from .utils3d import *
from .transform3d import *
from .scene_object import *
from .cuboid import *
from .camera import *
from .box import *
from .nvdu_data import * | Dataset_Utilities-master | nvdu/core/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from .scene_object import *
# ========================= Cuboid2d =========================
class Mesh(SceneObject):
"""Container for a 3d model"""
def __init__(self, mesh_file_path=None, in_parent_object = None):
super(Mesh, self).__init__(in_parent_object)
self.source_file_path = mesh_file_path
def set_initial_matrix(self, new_initial_matrix):
self._relative_transform.set_initial_matrix(new_initial_matrix)
def get_initial_matrix(self):
return self._relative_transform.initial_matrix
| Dataset_Utilities-master | nvdu/core/mesh.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import numpy as np
import json
from os import path
from .transform3d import *
from .utils3d import *
class CameraIntrinsicSettings(object):
DEFAULT_ZNEAR = 1
# DEFAULT_ZFAR = 100000.0
DEFAULT_ZFAR = DEFAULT_ZNEAR
def __init__(self,
res_width = 640.0, res_height = 480.0,
fx = 640.0, fy = 640.0,
cx = 320.0, cy = 240.0,
projection_matrix = None):
self.res_width = res_width
self.res_height = res_height
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.znear = self.DEFAULT_ZNEAR
self.zfar = self.DEFAULT_ZFAR
self.projection_matrix = projection_matrix
@staticmethod
def from_perspective_fov_horizontal(res_width = 640.0, res_height = 480.0, hfov = 90.0):
'''
Create camera intrinsics settings from 3d rendering horizontal field of view
'''
cx = res_width / 2.0
cy = res_height / 2.0
fx = cx / np.tan(np.deg2rad(hfov) / 2.0)
fy = fx
# print("CameraIntrinsicSettings: res_width = {} - res_height = {} - hfov = {} - cx = {} - cy = {} - fx = {} - fy = {}".format(
# res_width, res_height, hfov, cx, cy, fx, fy))
new_cam_instrinsics = CameraIntrinsicSettings(res_width, res_height, fx, fy, cx, cy)
return new_cam_instrinsics
@staticmethod
def from_json_object(json_obj):
intrinsic_settings = json_obj["intrinsic_settings"] if ("intrinsic_settings" in json_obj) else None
if (intrinsic_settings is None):
return None
print("intrinsic_settings: {}".format(intrinsic_settings))
try:
captured_image_size = json_obj['captured_image_size']
res_width = captured_image_size['width']
res_height = captured_image_size['height']
except KeyError:
print("*** Error ***: 'captured_image_size' is not present in camera settings file. Using default 640 x 480.")
res_width = 640
res_height = 480
fx = intrinsic_settings['fx'] if ('fx' in intrinsic_settings) else 640.0
fy = intrinsic_settings['fy'] if ('fy' in intrinsic_settings) else 640.0
cx = intrinsic_settings['cx'] if ('cx' in intrinsic_settings) else (res_width / 2.0)
cy = intrinsic_settings['cy'] if ('cy' in intrinsic_settings) else (res_height / 2.0)
projection_matrix_json = json_obj["cameraProjectionMatrix"] if ("cameraProjectionMatrix" in json_obj) else None
projection_matrix = None
if (not projection_matrix_json is None):
projection_matrix = Matrix44(projection_matrix_json)
projection_matrix[2, 0] = -projection_matrix[2, 0]
projection_matrix[2, 1] = -projection_matrix[2, 1]
projection_matrix[2, 3] = -projection_matrix[2, 3]
projection_matrix[3, 2] = -projection_matrix[3, 2]
# print("projection_matrix_json: {}".format(projection_matrix_json))
print("projection_matrix: {}".format(projection_matrix))
return CameraIntrinsicSettings(res_width, res_height, fx, fy, cx, cy, projection_matrix)
@staticmethod
def from_json_file(json_file_path):
if (path.exists(json_file_path)):
with open(json_file_path, 'r') as json_file:
json_obj = json.load(json_file)
if ('camera_settings' in json_obj):
viewpoint_list = json_obj['camera_settings']
# TODO: Need to parse all the viewpoints information, right now we only parse the first viewpoint
viewpoint_obj = viewpoint_list[0]
return CameraIntrinsicSettings.from_json_object(viewpoint_obj)
return None
def get_intrinsic_matrix(self):
"""
Get the camera intrinsic matrix as numpy array
"""
intrinsic_matrix = np.array([
[self.fx, 0, self.cx],
[0, self.fy, self.cy],
[0, 0, 1.0]
], dtype='double')
return intrinsic_matrix
def get_projection_matrix(self):
if (self.projection_matrix is None):
self.calculate_projection_matrix()
return self.projection_matrix
def calculate_projection_matrix(self):
zdiff = float(self.zfar - self.znear)
a = (2.0 * self.fx) / float(self.res_width)
b = (2.0 * self.fy) / float(self.res_height)
# print('a: {} - b: {}'.format(a, b))
c = -self.znear / zdiff if (zdiff > 0) else 0
d = (self.znear * self.zfar) / zdiff if (zdiff > 0) else (-self.znear)
c1 = 1.0 - (2.0 * self.cx) / self.res_width
c2 = (2.0 * self.cy) / self.res_height - 1.0
self.projection_matrix = Matrix44([
[a, 0, 0, 0],
[0, b, 0, 0],
[c1, c2, c, d],
[0, 0, -1.0, 0]
])
def str():
return "{}".format(self.get_intrinsic_matrix()) | Dataset_Utilities-master | nvdu/core/camera.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
# =============================== Data parsing ===============================
# ========================= CoordinateSystem =========================
# TODO: May just want to use the Transform3d class
class CoordinateSystem(object):
"""This class present a coordinate system with 3 main directions
Each direction is represent by a vector in OpenCV coordinate system
By default in OpenCV coordinate system:
Forward: Z - [0, 0, 1]
Right: X - [1, 0, 0]
Up: -Y - [0, -1, 0]
"""
def __init__(self,
forward = [0, 0, 1],
right = [1, 0, 0],
up = [0, -1, 0]):
self.forward = forward
self.right = right
self.up = up
# TODO: Build the transform matrix to convert from OpenCV to this coordinate system
# ========================= Rotator =========================
class Rotator():
def __init__(self, angles = [0, 0, 0]):
# Store the angle (in radian) rotated in each axis: X, Y, Z
self.angles = angles
# NOTE: All the calculation use the OpenCV coordinate system
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/OWENS/LECT9/node2.html
def __str__(self):
return "({}, {}, {})".format(self.angles[0], self.angles[1], self.angles[2])
@property
def yaw(self):
return self.angle[1]
@property
def pitch(self):
return self.angle[0]
@property
def roll(self):
return self.angle[2]
@staticmethod
def create_from_yaw_pitch_roll(yaw = 0, pitch = 0, roll = 0):
return rotator([pitch, yaw, roll])
@staticmethod
def create_from_yaw_pitch_roll_degree(yaw = 0, pitch = 0, roll = 0):
return rotator([np.deg2rad(pitch), np.deg2rad(yaw), np.deg2rad(roll)])
def add(self, other_rotator):
return rotator([
self.angles[0] + other_rotator.angles[0],
self.angles[1] + other_rotator.angles[1],
self.angles[2] + other_rotator.angles[2],
])
# https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
# NOTE: The Tait-Bryan angles result is reverted so instead of ZXY we must use the YXZ
# NOTE: The rotation order is Yaw Pitch Roll or Y X Z
# Output: 3x3 rotation row-major matrix
def to_rotation_matrix(self):
x, y, z = self.angles
# Z1 X2 Y3
c1 = np.cos(z)
s1 = np.sin(z)
c2 = np.cos(x)
s2 = np.sin(x)
c3 = np.cos(y)
s3 = np.sin(y)
return np.array(
[
[
c1 * c3 - s1 * s2 * s3,
-c2 * s1,
c1 * s3 + c3 * s1 * s2
],
[
c3 * s1 + c1 * s2 * s3,
c1 * c2,
s1 * s3 - c1 * c3 * s2
],
[
-c2 * s3,
s2,
c2 * c3
]
]
)
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Euler_Angles_to_Quaternion_Conversion
# NOTE: The rotation order is Yaw Pitch Roll or Y X Z
def to_quaternion(self):
x, y, z = self.angles
halfX = x * 0.5
halfY = y * 0.5
halfZ = z * 0.5
cX = np.cos(halfX)
sX = np.sin(halfX)
cY = np.cos(halfY)
sY = np.sin(halfY)
cZ = np.cos(halfZ)
sZ = np.sin(halfZ)
return np.array(
[
-sX * cY * cZ - cX * sY * sZ,
sX * cY * sZ - cX * sY * cZ,
sX * sY * cZ - cX * cY * sZ,
sX * sY * sZ + cX * cY * cZ
]
) | Dataset_Utilities-master | nvdu/core/utils3d.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
# import future
# from enum import IntEnum, unique
import numpy as np
import cv2
from .scene_object import *
# Related to the object's local coordinate system
# @unique
# class CuboidVertexType(IntEnum):
class CuboidVertexType():
FrontTopRight = 0
FrontTopLeft = 1
FrontBottomLeft = 2
FrontBottomRight = 3
RearTopRight = 4
RearTopLeft = 5
RearBottomLeft = 6
RearBottomRight = 7
Center = 8
TotalCornerVertexCount = 8 # Corner vertexes doesn't include the center point
TotalVertexCount = 9
# List of the vertex indexes in each line edges of the cuboid
CuboidLineIndexes = [
# Front face
[ CuboidVertexType.FrontTopLeft, CuboidVertexType.FrontTopRight ],
[ CuboidVertexType.FrontTopRight, CuboidVertexType.FrontBottomRight ],
[ CuboidVertexType.FrontBottomRight, CuboidVertexType.FrontBottomLeft ],
[ CuboidVertexType.FrontBottomLeft, CuboidVertexType.FrontTopLeft ],
# Back face
[ CuboidVertexType.RearTopLeft, CuboidVertexType.RearTopRight ],
[ CuboidVertexType.RearTopRight, CuboidVertexType.RearBottomRight ],
[ CuboidVertexType.RearBottomRight, CuboidVertexType.RearBottomLeft ],
[ CuboidVertexType.RearBottomLeft, CuboidVertexType.RearTopLeft ],
# Left face
[ CuboidVertexType.FrontBottomLeft, CuboidVertexType.RearBottomLeft ],
[ CuboidVertexType.FrontTopLeft, CuboidVertexType.RearTopLeft ],
# Right face
[ CuboidVertexType.FrontBottomRight, CuboidVertexType.RearBottomRight ],
[ CuboidVertexType.FrontTopRight, CuboidVertexType.RearTopRight ],
]
# ========================= Cuboid2d =========================
class Cuboid2d(SceneObject):
"""Container for 2d projected points of a cuboid on an image"""
def __init__(self, vertices=[]):
"""Create a cuboid 2d from a list of 2d points
Args:
vertices - numpy array ([8, 9] * 2)
"""
super(Cuboid2d, self).__init__()
self._vertices = np.array(vertices)
# print('Cuboid2d - vertices: {}'.format(self._vertices))
def get_vertex(self, vertex_type):
"""Get the location of a vertex
Args:
vertex_type: enum of type CuboidVertexType
Return:
Numpy array(3) - Location of the vertex type in the cuboid
"""
return self._vertices[vertex_type]
def get_vertices(self):
return self._vertices
# ========================= Cuboid3d =========================
class Cuboid3d(SceneObject):
# Create a box with a certain size
# TODO: Instead of using center_location and coord_system, should pass in a Transform3d
def __init__(self, size3d = [1.0, 1.0, 1.0], center_location = [0, 0, 0],
coord_system = None, parent_object = None):
super(Cuboid3d, self).__init__(parent_object)
# NOTE: This local coordinate system is similar
# to the intrinsic transform matrix of a 3d object
self.center_location = center_location
self.coord_system = coord_system
self.size3d = size3d
self._vertices = [0, 0, 0] * CuboidVertexType.TotalVertexCount
self.generate_vertexes()
def get_vertex(self, vertex_type):
"""Get the location of a vertex
Args:
vertex_type: enum of type CuboidVertexType
Return:
Numpy array(3) - Location of the vertex type in the cuboid
"""
return self._vertices[vertex_type]
def get_vertices(self):
return self._vertices
def generate_vertexes(self):
width, height, depth = self.size3d
# By default just use the normal OpenCV coordinate system
if (self.coord_system is None):
cx, cy, cz = self.center_location
# X axis point to the right
right = cx + width / 2.0
left = cx - width / 2.0
# Y axis point downward
top = cy - height / 2.0
bottom = cy + height / 2.0
# Z axis point forward
front = cz + depth / 2.0
rear = cz - depth / 2.0
# List of 8 vertices of the box
self._vertices = [
[right, top, front], # Front Top Right
[left, top, front], # Front Top Left
[left, bottom, front], # Front Bottom Left
[right, bottom, front], # Front Bottom Right
[right, top, rear], # Rear Top Right
[left, top, rear], # Rear Top Left
[left, bottom, rear], # Rear Bottom Left
[right, bottom, rear], # Rear Bottom Right
self.center_location, # Center
]
else:
# NOTE: should use quaternion for initial transform
sx, sy, sz = self.size3d
forward = np.array(self.coord_system.forward, dtype=float) * sy * 0.5
up = np.array(self.coord_system.up, dtype=float) * sz * 0.5
right = np.array(self.coord_system.right, dtype=float) * sx * 0.5
center = np.array(self.center_location, dtype=float)
self._vertices = [
center + forward + up + right, # Front Top Right
center + forward + up - right, # Front Top Left
center + forward - up - right, # Front Bottom Left
center + forward - up + right, # Front Bottom Right
center - forward + up + right, # Rear Top Right
center - forward + up - right, # Rear Top Left
center - forward - up - right, # Rear Bottom Left
center - forward - up + right, # Rear Bottom Right
self.center_location, # Center
]
# print("cuboid3d - forward: {} - up: {} - right: {}".format(forward, up, right))
# print("cuboid3d - size3d: {}".format(self.size3d))
# print("cuboid3d - depth: {} - width: {} - height: {}".format(depth, width, height))
# print("cuboid3d - vertices: {}".format(self._vertices))
def get_projected_cuboid2d(self, cuboid_transform, camera_intrinsic_matrix):
"""
Project the cuboid into the projection plane using CameraIntrinsicSettings to get a cuboid 2d
Args:
cuboid_transform: the world transform of the cuboid
camera_intrinsic_matrix: camera intrinsic matrix
Return:
Cuboid2d - the projected cuboid points
"""
# projected_vertices = [0, 0] * CuboidVertexType.TotalVertexCount
# world_transform_matrix = self.get_world_transform_matrix()
world_transform_matrix = cuboid_transform
rvec = [0, 0, 0]
tvec = [0, 0, 0]
dist_coeffs = np.zeros((4, 1))
transformed_vertices = [0, 0, 0] * CuboidVertexType.TotalVertexCount
for vertex_index in range(CuboidVertexType.TotalVertexCount):
vertex3d = self._vertices[vertex_index]
transformed_vertices[vertex_index] = world_transform_matrix * vertex3d
projected_vertices = cv2.projectPoints(transformed_vertices, rvec, tvec,
camera_intrinsic_matrix, dist_coeffs)
return Cuboid2d(projected_vertices)
| Dataset_Utilities-master | nvdu/core/cuboid.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from .scene_object import *
# ========================= Cuboid2d =========================
class PivotAxis(SceneObject):
"""Object represent a pivot axis in 3d space"""
def __init__(self, in_size3d = [1.0, 1.0, 1.0], in_parent_object = None):
super(PivotAxis, self).__init__(in_parent_object)
self.size3d = in_size3d
self.generate_vertexes()
def generate_vertexes(self):
self.origin_loc = [0.0, 0.0, 0.0]
x, y, z = self.size3d
self.x_axis = [x, 0.0, 0.0]
self.y_axis = [0.0, y, 0.0]
self.z_axis = [0.0, 0.0, z]
| Dataset_Utilities-master | nvdu/core/pivot_axis.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import future
from os import listdir, path
from ctypes import *
import json
# from typing import List, Dict, Tuple
import fnmatch
import glob
import numpy as np
import cv2
import pyrr
import copy
from fuzzyfinder import fuzzyfinder
from .utils3d import *
from .transform3d import *
from .cuboid import *
from .pivot_axis import *
from .mesh import *
from .camera import *
FrameDataExt = ".json"
FrameImageExt = ".png"
FrameImageExt_Depth = ".depth"
FrameImageExt_Pls = ".pls"
FrameImageExt_Pls_no = ".pls_no"
FrameAspectDict = {
'main': {
'name': 'Main',
'ext': ''
},
'depth': {
'name': 'Depth',
'ext': FrameImageExt_Depth
},
'pls': {
'name': 'Pixel Level Segmentation',
'ext': FrameImageExt_Pls
},
'pls_no': {
'name': 'Pixel Level Segmentation No Occlusion',
'ext': FrameImageExt_Pls_no
}
}
# =============================== Helper functions ===============================
DEFAULT_MESH_NAME_FORMAT = "{}/google_16k/textured.obj"
def get_mesh_file_path(mesh_folder_path, mesh_name, mesh_name_format=DEFAULT_MESH_NAME_FORMAT):
# NOTE: The name of object in the object settings have postfix '_16k', we need to remove it
if mesh_name.endswith('_16k'):
mesh_name = mesh_name[:-4]
return path.join(mesh_folder_path, mesh_name_format.format(mesh_name))
DEFAULT_FRAME_NAME_FORMAT = "{0:06d}"
def get_frame_name(frame_index, frame_name_format=DEFAULT_FRAME_NAME_FORMAT):
"""Get the number part of a frame's name"""
frame_number_name = frame_name_format.format(frame_index)
return frame_number_name
def is_frame_exists(data_dir_path, frame_index, frame_name_format=DEFAULT_FRAME_NAME_FORMAT):
frame_data_file_name = get_frame_name(frame_index, frame_name_format) + FrameImageExt
frame_data_file_path = path.join(data_dir_path, frame_data_file_name)
# print("Checking frame {} at: {}".format(frame_index, frame_data_file_path))
return path.exists(frame_data_file_path)
def is_frame_data_exists(data_dir_path, frame_index, frame_name_format=DEFAULT_FRAME_NAME_FORMAT):
frame_data_file_name = get_frame_name(frame_index, frame_name_format) + FrameDataExt
frame_data_file_path = path.join(data_dir_path, frame_data_file_name)
# print("Checking frame {} at: {}".format(frame_index, frame_data_file_path))
return path.exists(frame_data_file_path)
def get_dataset_setting_file_path(data_dir_path):
return path.join(data_dir_path, '_settings.json')
def get_dataset_object_setting_file_path(data_dir_path):
return path.join(data_dir_path, '_object_settings.json')
def get_frame_data_path(data_dir_path, frame_index, frame_name_format=DEFAULT_FRAME_NAME_FORMAT):
frame_data_file_name = get_frame_name(frame_index, frame_name_format) + FrameDataExt
frame_data_file_path = path.join(data_dir_path, frame_data_file_name)
return frame_data_file_path
def get_frame_image_path(data_dir_path, frame_index, frame_name_format=DEFAULT_FRAME_NAME_FORMAT, aspect_id='main'):
frame_name = get_frame_name(frame_index, frame_name_format)
frame_aspect_data = FrameAspectDict[aspect_id]
aspect_ext = frame_aspect_data['ext']
frame_aspect_image_file = path.join(data_dir_path, frame_name) + aspect_ext + FrameImageExt
return frame_aspect_image_file
# Get the total number of frames in a data set
# data_dir_path: path to the dataset's directory
def get_number_of_frames(data_dir_path, frame_name_format=DEFAULT_FRAME_NAME_FORMAT):
number_of_frame = 0
min_index = 1
# NOTE: We only count till 1000000 frames for now
max_index = 1000000
while (min_index <= max_index):
check_index = int((min_index + max_index) / 2)
frame_exists = is_frame_exists(data_dir_path, check_index, frame_name_format)
if frame_exists:
# NOTE: The frame start from 0 => the number of frame will be the last index + 1
number_of_frame = check_index + 1
min_index = check_index + 1
else:
max_index = check_index - 1
return number_of_frame
class NVDUDataset(object):
DEFAULT_IMAGE_NAME_FILTERS = ["*.png"]
def __init__(self, in_dataset_dir,
in_annotation_dir = None,
in_img_name_filters = None):
self._dataset_dir = in_dataset_dir
self._annotation_dr = in_annotation_dir if (not in_annotation_dir is None) else self._dataset_dir
self._img_name_filters = in_img_name_filters if (not in_img_name_filters is None) else NVDUDataset.DEFAULT_IMAGE_NAME_FILTERS
self._frame_names = []
self._frame_count = 0
@property
def frame_names(self):
return self._frame_names
@property
def frame_count(self):
return self._frame_count
@property
def camera_setting_file_path(self):
return NVDUDataset.get_camera_setting_file_path(self._dataset_dir)
@property
def object_setting_file_path(self):
return NVDUDataset.get_object_setting_file_path(self._dataset_dir)
# Scane the dataset and return how many frames are in it
def scan(self):
self._frame_names = []
if not path.exists(self._dataset_dir):
return 0
print("scan - _dataset_dir: {} - _img_name_filters: {}".format(self._dataset_dir, self._img_name_filters))
for file_name in listdir(self._dataset_dir):
check_file_path = path.join(self._dataset_dir, file_name)
if path.isfile(check_file_path):
is_name_match_filters = any(fnmatch.fnmatch(file_name, check_filter) for check_filter in self._img_name_filters)
# is_name_match_filters = False
# for check_filter in self._img_name_filters:
# if fnmatch.fnmatch(file_name, check_filter):
# is_name_match_filters = True
# print("check_filter: {} - file_name: {} - is good".format(check_filter, file_name))
# break
if (is_name_match_filters):
# NOTE: Consider frame name as the file name without its extension
frame_name = path.splitext(file_name)[0]
# NOTE: Consider frame name as the first part of the string before the '.'
# frame_name = file_name.split(".")[0]
# Check if it have annotation data or not
check_annotation_file_path = self.get_annotation_file_path_of_frame(frame_name)
# print("file_name: {} - check_file_path: {} - frame_name: {} - check_annotation_file_path: {}".format(file_name, check_file_path, frame_name, check_annotation_file_path))
if path.exists(check_annotation_file_path):
self._frame_names.append(frame_name)
self._frame_names = sorted(self._frame_names)
self._frame_count = len(self._frame_names)
# print("_frame_names: {}".format(self._frame_names))
return self._frame_count
def get_image_file_path_of_frame(self, in_frame_name):
for existing_file in glob.glob(in_frame_name + '*'):
for name_filter in self._img_name_filters:
if fnmatch.fnmatch(existing_file, name_filter):
return path.join(self._dataset_dir, existing_file)
raise Exception('File not found: {}.*'.format(in_frame_name))
def get_annotation_file_path_of_frame(self, in_frame_name):
return path.join(self._annotation_dr, in_frame_name + FrameDataExt)
def get_frame_name_from_index(self, in_frame_index):
return self._frame_names[in_frame_index] if ((in_frame_index >= 0) and (in_frame_index < len(self._frame_names))) else ""
# Return the frame's image file path and annotation file path when know its index
def get_frame_file_path_from_index(self, in_frame_index):
frame_name = self.get_frame_name_from_index(in_frame_index)
return self.get_frame_file_path_from_name(frame_name)
# Return the frame's image file path and annotation file path when know its name
def get_frame_file_path_from_name(self, in_frame_name):
if (not in_frame_name):
return ("", "")
image_file_path = self.get_image_file_path_of_frame(in_frame_name)
annotation_file_path = self.get_annotation_file_path_of_frame(in_frame_name)
return (image_file_path, annotation_file_path)
@staticmethod
def get_default_camera_setting_file_path(data_dir_path):
return path.join(data_dir_path, '_camera_settings.json')
@staticmethod
def get_default_object_setting_file_path(data_dir_path):
return path.join(data_dir_path, '_object_settings.json')
# =============================== Dataset Settings ===============================
# Class represent the settings data for each exported object
class ExportedObjectSettings(object):
def __init__(self, name = '', mesh_file_path = '', initial_matrix = None,
cuboid_dimension = Vector3([0, 0, 0]), cuboid_center = Vector3([0, 0, 0]),
coord_system = CoordinateSystem(), obj_class_id = 0, obj_color = None):
self.name = ''
self.mesh_file_path = mesh_file_path
self.initial_matrix = initial_matrix
self.class_id = obj_class_id
# If object's color not specified then calculate it automatically from the id
if (obj_color is None):
self.class_color = [0, 0, 0, 255]
self.class_color[0] = int((self.class_id >> 5) / 7.0 * 255)
self.class_color[1] = int(((self.class_id >> 2) & 7) / 7.0 * 255)
self.class_color[2] = int((self.class_id & 3) / 3.0 * 255)
else:
self.class_color = obj_color
self.cuboid_dimension = cuboid_dimension
self.cuboid_center_local = cuboid_center
self.coord_system = coord_system
self.cuboid3d = Cuboid3d(self.cuboid_dimension, self.cuboid_center_local, self.coord_system)
self.mesh_model = None
self.pivot_axis = PivotAxis(np.array(self.cuboid_dimension))
def __str__(self):
return "({} - {} - {})".format(self.name, self.mesh_file_path, self.initial_matrix)
class ExporterSettings(object):
def __init__(self):
self.captured_image_size = [0, 0]
@classmethod
def parse_from_json_data(cls, json_data):
parsed_exporter_settings = ExporterSettings()
parsed_exporter_settings.captured_image_size = [json_data['camera_settings'][0]['captured_image_size']['width'],
json_data['camera_settings'][0]['captured_image_size']['height']]
print("parsed_exporter_settings.captured_image_size: {}".format(parsed_exporter_settings.captured_image_size))
return parsed_exporter_settings
class DatasetSettings():
def __init__(self, mesh_dir_path=''):
self.mesh_dir_path = mesh_dir_path
self.obj_settings = {}
self.exporter_settings = ExporterSettings()
self.coord_system = CoordinateSystem()
@classmethod
def parse_from_json_data(cls, json_data, mesh_dir_path=''):
parsed_settings = DatasetSettings(mesh_dir_path)
coord_system = None
parsed_settings.coord_system = coord_system
for check_obj in json_data['exported_objects']:
obj_class = check_obj['class']
obj_mesh_file_path = get_mesh_file_path(parsed_settings.mesh_dir_path, obj_class)
obj_initial_matrix = Matrix44(check_obj['fixed_model_transform'])
obj_cuboid_dimension = check_obj['cuboid_dimensions'] if ('cuboid_dimensions' in check_obj) else Vector3([0, 0, 0])
obj_cuboid_center = check_obj['cuboid_center_local'] if ('cuboid_center_local' in check_obj) else Vector3([0, 0, 0])
obj_class_id = int(check_obj['segmentation_class_id']) if ('segmentation_class_id' in check_obj) else 0
obj_color = check_obj['color'] if ('color' in check_obj) else None
new_obj_info = ExportedObjectSettings(obj_class, obj_mesh_file_path,
obj_initial_matrix, obj_cuboid_dimension, obj_cuboid_center, coord_system, obj_class_id, obj_color)
# print('scan_settings_data: {}'.format(new_obj_info))
# print('obj_mesh_file_path: {}'.format(obj_mesh_file_path))
parsed_settings.obj_settings[obj_class] = new_obj_info
return parsed_settings
@classmethod
def parse_from_file(cls, setting_file_path, mesh_dir_path=''):
parsed_settings = None
if (path.exists(setting_file_path)):
json_data = json.load(open(setting_file_path))
parsed_settings = cls.parse_from_json_data(json_data, mesh_dir_path)
print('parse_from_file: setting_file_path: {} - mesh_dir_path: {} - parsed_settings: {}'.format(
setting_file_path, mesh_dir_path, parsed_settings))
return parsed_settings
@classmethod
def parse_from_dataset(cls, dataset_dir_path, mesh_dir_path=''):
setting_file_path = get_dataset_object_setting_file_path(dataset_dir_path)
# print('parse_from_dataset: dataset_dir_path: {} - mesh_dir_path: {}'.format(dataset_dir_path, mesh_dir_path))
return cls.parse_from_file(setting_file_path, mesh_dir_path)
# Get the settings info for a specified object class
def get_object_settings(self, object_class):
if (object_class in self.obj_settings):
return self.obj_settings[object_class]
# TODO: If there are no match object_class name then try to find the closest match using fuzzy find
all_object_classes = list(self.obj_settings.keys())
fuzzy_object_classes = list(fuzzyfinder(object_class, all_object_classes))
if (len(fuzzy_object_classes) > 0):
fuzzy_object_class = fuzzy_object_classes[0]
# print("fuzzy_object_classes: {} - fuzzy_object_class: {}".format(fuzzy_object_classes, fuzzy_object_class))
return self.obj_settings[fuzzy_object_class]
return None
# =============================== AnnotatedObjectInfo ===============================
# Class contain annotation data of each object in the scene
class AnnotatedObjectInfo(SceneObject):
def __init__(self, dataset_settings, obj_class = '', name = ''):
super(AnnotatedObjectInfo, self).__init__()
self.name = name
self.obj_class = obj_class
self.object_settings = dataset_settings.get_object_settings(obj_class) if not (dataset_settings is None) else None
self.location = pyrr.Vector3()
self.cuboid_center = None
self.quaternion = pyrr.Quaternion([0.0, 0.0, 0.0, 1.0])
# self.bb2d = BoundingBox()
self.cuboid2d = None
self.keypoints = []
if not (self.object_settings is None):
self.dimension = self.object_settings.cuboid_dimension
self.cuboid3d = copy.deepcopy(self.object_settings.cuboid3d) if (not self.object_settings is None) else None
self.mesh = Mesh(self.object_settings.mesh_file_path)
self.mesh.set_initial_matrix(self.object_settings.initial_matrix)
self.pivot_axis = self.object_settings.pivot_axis
else:
self.dimension = None
self.cuboid3d = None
self.mesh = None
self.pivot_axis = None
self.is_modified = False
self.relative_transform = transform3d()
# Parse and create an annotated object from a json object
@classmethod
def parse_from_json_object(self, dataset_settings, json_obj):
try:
obj_class = json_obj['class']
# print('parse_from_json_object: dataset_settings: {} - name: {} - class: {}'.format(
# dataset_settings, obj_name, obj_class))
except KeyError:
print("*** Error ***: 'class' is not present in annotation file. Using default '002_master_chef_can_16k'.")
obj_class = '002_master_chef_can_16k'
parsed_object = AnnotatedObjectInfo(dataset_settings, obj_class)
if ('location' in json_obj):
parsed_object.location = json_obj['location']
if ('quaternion_xyzw' in json_obj):
parsed_object.quaternion = Quaternion(json_obj['quaternion_xyzw'])
if ('cuboid_centroid' in json_obj):
parsed_object.cuboid_center = json_obj['cuboid_centroid']
# TODO: Parse bounding box 2d
# json_obj['bounding_rectangle_imagespace']
# Parse the cuboid in image space
if ('projected_cuboid' in json_obj):
img_width, img_height = dataset_settings.exporter_settings.captured_image_size
cuboid2d_vertices_json_data = json_obj['projected_cuboid']
# Convert the fraction coordinate to absolute coordinate
# cuboid2d_vertices = list([img_width * vertex['x'], img_height * vertex['y']] for vertex in cuboid2d_vertices_json_data)
cuboid2d_vertices = cuboid2d_vertices_json_data
# print('img_width: {} - img_height: {}'.format(img_width, img_height))
# print('cuboid2d_vertices: {}'.format(cuboid2d_vertices))
parsed_object.cuboid2d = Cuboid2d(cuboid2d_vertices)
# Parse the keypoints
if ('keypoints' in json_obj):
annotated_keypoints_json_data = json_obj['keypoints']
for check_keypoint_json_obj in annotated_keypoints_json_data:
parsed_object.keypoints.append(check_keypoint_json_obj)
parsed_object.update_transform()
return parsed_object
def set_transform(self, new_location, new_quaternion):
self.location = new_location
self.quaternion = new_quaternion
self.is_modified = True
def set_location(self, new_location):
self.location = new_location
self.is_modified = True
def set_quaternion(self, new_quaternion):
self.quaternion = new_quaternion
self.is_modified = True
def update_transform(self):
self.set_relative_transform(self.location, self.quaternion)
# print('update_transform: location: {} - quaternion: {}'.format(self.location, self.quaternion))
should_show = not (self.location is None) and not (self.quaternion is None)
if (not self.mesh is None) and should_show:
self.mesh.set_relative_transform(self.location, self.quaternion)
if (not self.cuboid3d is None) and should_show:
cuboid_location = self.cuboid_center if (not self.cuboid_center is None) else self.location
# self.cuboid3d.set_relative_transform(self.location, self.quaternion)
self.cuboid3d.set_relative_transform(cuboid_location, self.quaternion)
if (not self.pivot_axis is None) and should_show:
self.pivot_axis.set_relative_transform(self.location, self.quaternion)
self.is_modified = False
# =============================== AnnotatedSceneInfo ===============================
class AnnotatedSceneInfo(object):
"""Annotation data of a scene"""
def __init__(self, dataset_settings):
self.source_file_path = ""
self.dataset_settings = dataset_settings
self.objects = []
# Numpy array of pixel data
self.image_data = None
self.camera_intrinsics = None
def get_object_info(self, object_class_name):
found_objects = []
for check_object in self.objects:
if not (check_object is None) and (check_object.obj_class == object_class_name):
found_objects.append(check_object)
return found_objects
def set_image_data(self, new_image_numpy_data):
self.image_data = new_image_numpy_data
# print("set_image_data: {}".format(self.image_data.shape))
def get_scene_info_str(self):
info_str = path.splitext(path.basename(self.source_file_path))[0]
return info_str
# Parse and create an annotated scene from a json object
@classmethod
def create_from_json_data(cls, dataset_settings, frame_json_data, image_data):
parsed_scene = AnnotatedSceneInfo(dataset_settings)
# self.camera_intrinsics = dataset_settings.camera_intrinsics
if ('view_data' in frame_json_data):
view_data = frame_json_data['view_data']
# TODO: Need to handle the randomized FOV in different frame
# if ((not view_data is None) and ('fov' in view_data)):
# camera_fovx = frame_json_data['view_data']['fov']
# parsed_scene.camera_fovx = camera_fovx
# parsed_scene.camera_intrinsics = CameraIntrinsicSettings.from_perspective_fov_horizontal(hfov=camera_fovx)
parsed_scene.objects = []
try:
objects_data = frame_json_data['objects']
for check_obj_info in objects_data:
new_obj = AnnotatedObjectInfo.parse_from_json_object(dataset_settings, check_obj_info)
parsed_scene.objects.append(new_obj)
except KeyError:
print("*** Error ***: 'objects' is not present in annotation file. No annotations will be displayed.")
# !TEST
# obj_transform = new_obj.get_world_transform_matrix()
# projected_cuboid = new_obj.cuboid3d.get_projected_cuboid2d(obj_transform, self.camera_intrinsics.get_intrinsic_matrix())
parsed_scene.image_data = image_data
return parsed_scene
# Parse and create an annotated scene from a json object
@classmethod
def create_from_file(cls, dataset_settings, frame_file_path, image_file_path=""):
json_data = json.load(open(frame_file_path))
if (path.exists(image_file_path)):
image_data = np.array(cv2.imread(image_file_path))
image_data = image_data[:,:,::-1] # Reorder color channels to be RGB
else:
image_data = None
new_scene_info = cls.create_from_json_data(dataset_settings, json_data, image_data)
new_scene_info.source_file_path = frame_file_path
return new_scene_info
| Dataset_Utilities-master | nvdu/core/nvdu_data.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from .utils3d import *
class transform3d():
def __init__(self):
self.location = Vector3()
self.scale = Vector3([1, 1, 1])
self.rotation = Rotator([0.0, 0.0, 0.0])
self.quaternion = Quaternion([0, 0, 0, 1])
self.initial_matrix = Matrix44.identity()
self.transform_matrix = Matrix44.identity()
# Flag indicate whether the transformation is modified or not
self.is_changed = False
def to_matrix(self):
if self.is_changed:
self.update_transform_matrix()
return self.transform_matrix
def update_transform_matrix(self):
scale_matrix = Matrix44.from_scale(self.scale)
translation_matrix = Matrix44.from_translation(self.location)
# TODO: For some reason the qx, qy, qz part of the quaternion must be flipped
# Need to understand why and fix it
# The change need to be made together with the coordinate conversion in NDDS
# rotation_matrix = Matrix44.from_quaternion(self.quaternion)
qx, qy, qz, qw = self.quaternion
test_quaternion = Quaternion([-qx, -qy, -qz, qw])
rotation_matrix = Matrix44.from_quaternion(test_quaternion)
# print('update_transform_matrix: rotation_matrix = {}'.format(rotation_matrix))
relative_matrix = (translation_matrix * scale_matrix * rotation_matrix)
# self.transform_matrix = relative_matrix * self.initial_matrix
self.transform_matrix = relative_matrix
# print('update_transform_matrix: transform_matrix = {}'.format(self.transform_matrix))
def mark_changed(self):
self.is_changed = True
# ======================== Rotation ========================
def set_euler_rotation(self, new_rotation):
self.rotation = new_rotation
new_quaternion = self.rotation.to_quaternion()
self.set_quaternion(new_quaternion)
def rotate(self, angle_rotator):
# new_rotator = self.rotation + angle_rotator
new_rotator = self.rotation.add(angle_rotator)
# print("New rotation: {}".format(new_rotator))
self.set_euler_rotation(new_rotator)
def set_quaternion(self, new_quaternion):
# print("New quaternion: {}".format(new_quaternion))
self.quaternion = new_quaternion
self.mark_changed()
# ======================== Scale ========================
# Scale the mesh with the same amount between all the axis
def set_scale_uniform(self, uniform_scale):
self.scale *= uniform_scale
self.mark_changed()
def set_scale(self, new_scale):
self.scale = new_scale
self.mark_changed()
# ======================== Translation ========================
def set_location(self, new_location):
self.location = new_location
self.mark_changed()
def move(self, move_vector):
new_location = self.location + move_vector
self.set_location(new_location)
# ======================== Others ========================
def set_initial_matrix(self, new_initial_matrix):
self.initial_matrix = new_initial_matrix
self.mark_changed()
def reset_transform(self):
self.set_location(Vector3())
# self.set_rotation(euler.create(0.0, 0.0, 0.0))
self.set_quaternion(Quaternion([0, 0, 0, 1]))
self.mark_changed()
| Dataset_Utilities-master | nvdu/core/transform3d.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import math
import sys
import cv2
from nvdu.core.cuboid import *
def is_point_valid(point):
if (point is None):
return False
if (math.isnan(point[0]) or math.isnan(point[1])):
return False
# NOTE: sometime the value get too big and we run into error:
# OverflowError: Python int too large to convert to C long
# if (math.fabs(point[0]) >= sys.maxsize) or (math.fabs(point[1]) >= sys.maxsize):
if (math.fabs(point[0]) >= 10000) or (math.fabs(point[1]) >= 10000):
return False
return True
# This module contains all the functions related to drawing on image
def draw_cuboid2d(image, cuboid2d, color, line_thickness=1, point_size=1):
if (image is None) or (cuboid2d is None):
return
# print("image: {} - image.shape: {}".format(image, image.shape))
line_type = cv2.LINE_AA
# Draw the lines edge of the cuboid
for line in CuboidLineIndexes:
vi0, vi1 = line
v0 = cuboid2d.get_vertex(vi0)
v1 = cuboid2d.get_vertex(vi1)
# print("draw line - v0: {} - v1: {}".format(v0, v1))
if (is_point_valid(v0) and is_point_valid(v1)):
v0 = (int(v0[0]), int(v0[1]))
v1 = (int(v1[0]), int(v1[1]))
# print("draw line - v0: {} - v1: {}".format(v0, v1))
cv2.line(image, v0, v1, color, line_thickness, line_type)
# Draw circle at each corner vertices of the cuboid
thickness = -1
# TODO: Highlight the top front vertices
for vertex_index in range(CuboidVertexType.TotalVertexCount):
vertex = cuboid2d.get_vertex(vertex_index)
if (not is_point_valid(vertex)):
continue
point = (int(vertex[0]), int(vertex[1]))
cv2.circle(image, point, point_size, color, thickness, line_type)
if (vertex_index == CuboidVertexType.FrontTopRight):
cv2.circle(image, point, point_size, (0,0,0), int(point_size / 2), line_type)
elif (vertex_index == CuboidVertexType.FrontTopLeft):
cv2.circle(image, point, point_size, (0,0,0), 1, line_type)
| Dataset_Utilities-master | nvdu/viz/image_draw.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from .utils3d import *
from nvdu.core import transform3d
from nvdu.core import scene_object
class SceneObjectVizBase(object):
def __init__(self, scene_object):
self.scene_object = scene_object
self.render_mode = RenderMode.normal
self._is_visible = True
def draw(self):
if ((self.scene_object is None) or (not self.is_visible())):
return
self.on_draw()
def on_draw(self):
pass
def is_visible(self):
return self._is_visible
def set_visibility(self, should_visible):
self._is_visible = should_visible
def hide(self):
self._is_visible = False
def show(self):
self._is_visible = True
def toggle_visibility(self):
self._is_visible = not self._is_visible
class SceneObjectViz3d(SceneObjectVizBase):
def __init__(self, scene_object):
super(SceneObjectViz3d, self).__init__(scene_object)
def draw(self):
if ((self.scene_object is None) or (not self.is_visible())):
return
# transform_matrix = self.scene_object.relative_transform.to_matrix()
# glMultMatrixf(get_opengl_matrixf(transform_matrix))
# for child_object in self.child_objects:
# if (child_object != None):
# child_object.draw()
glPushMatrix()
world_transform_matrix = self.scene_object.get_world_transform_matrix()
# print("{} - draw - world_transform_matrix: {}".format(self, world_transform_matrix))
glMultMatrixf(get_opengl_matrixf(world_transform_matrix))
self.on_draw()
glPopMatrix()
| Dataset_Utilities-master | nvdu/viz/scene_object.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import future
import pyrr
import pywavefront
import numpy as np
from ctypes import *
import json
from os import listdir, path
import pickle
import cv2
import pyglet
from nvdu.core.nvdu_data import *
from .camera import *
from .cuboid import *
from .viewport import *
from .pointcloud import *
# from .pivot_axis import *
from .mesh import *
from .background_image import *
# =============================== Helper functions ===============================
# =============================== Data parsing ===============================
# =============================== Dataset Settings ===============================
# =============================== AnnotatedObjectViz ===============================
# Class contain annotation data of each object in the scene
class AnnotatedObjectViz(object):
def __init__(self, annotated_object_info):
self.object_info = annotated_object_info
object_settings = self.object_info.object_settings
# self.bb2d = BoundingBox()
self.cuboid2d = Cuboid2dViz(self.object_info.cuboid2d, object_settings.class_color)
self.cuboid3d = Cuboid3dViz(self.object_info.cuboid3d, object_settings.class_color)
self.pivot_axis = PivotAxis(self.object_info.pivot_axis)
self.mesh = MeshViz(self.object_info.mesh)
raw_keypoints = self.object_info.keypoints
keypoint_locations = []
if not (raw_keypoints is None):
for check_keypoint in raw_keypoints:
# TODO: don't hardcode the key name here and use const instead
if not (check_keypoint is None) and ('projected_location' in check_keypoint):
check_keypoint_location = check_keypoint['projected_location']
keypoint_locations.append(check_keypoint_location)
# print("raw_keypoints: {}".format(raw_keypoints))
# print("keypoint_locations: {}".format(keypoint_locations))
self.keypoint2d = PointCloud2d(keypoint_locations)
self.is_modified = False
def update_transform(self):
# print('update_transform: location: {} - quaternion: {}'.format(self.location, self.quaternion))
should_show = not (self.location is None) and not (self.quaternion is None)
if (self.mesh):
self.mesh.set_visibility(should_show)
if (self.cuboid3d):
self.cuboid3d.set_visibility(should_show)
if (self.pivot_axis):
self.pivot_axis.set_visibility(should_show)
self.is_modified = False
def draw(self, visualizer_settings=None):
if (self.is_modified):
self.update_transform()
glPolygonMode(GL_FRONT_AND_BACK, visualizer_settings.render_mode)
if ((visualizer_settings is None) or visualizer_settings.show_mesh) and self.mesh:
self.mesh.draw()
if ((visualizer_settings is None) or visualizer_settings.show_cuboid3d) and self.cuboid3d:
self.cuboid3d.draw()
# self.cuboid2d.draw()
if ((visualizer_settings is None) or visualizer_settings.show_pivot_axis) and self.pivot_axis:
self.pivot_axis.draw()
def update_settings(self, visualizer_settings=None):
if (visualizer_settings is None):
return
have_valid_transform = not (self.object_info.location is None) and not (self.object_info.quaternion is None)
if (self.is_modified):
self.update_transform()
if self.mesh:
self.mesh.set_visibility(visualizer_settings.show_mesh and have_valid_transform)
self.mesh.render_mode = visualizer_settings.render_mode
if self.cuboid3d:
self.cuboid3d.set_visibility(visualizer_settings.show_cuboid3d and have_valid_transform)
if self.cuboid2d:
# print('update_settings: self.cuboid2d = {} - visualizer_settings.show_cuboid2d = {}'.format(self.cuboid2d, visualizer_settings.show_cuboid2d))
self.cuboid2d.set_visibility(visualizer_settings.show_cuboid2d)
if self.pivot_axis:
self.pivot_axis.set_visibility(visualizer_settings.show_pivot_axis and have_valid_transform)
if self.keypoint2d:
# print("visualizer_settings.show_keypoint2d: {}".format(visualizer_settings.show_keypoint2d))
self.keypoint2d.set_visibility(visualizer_settings.show_keypoint2d)
# =============================== AnnotatedSceneViz ===============================
class AnnotatedSceneViz(object):
"""Class contain annotation data of a scene"""
def __init__(self, annotated_scene_info):
self._scene_info = annotated_scene_info
self.camera_intrinsics = self._scene_info.camera_intrinsics
self._object_vizs = []
for check_object in self._scene_info.objects:
if not (check_object is None):
new_object_viz = AnnotatedObjectViz(check_object)
self._object_vizs.append(new_object_viz)
dataset_settings = self._scene_info.dataset_settings
if not (dataset_settings is None or dataset_settings.exporter_settings is None):
img_width, img_height = dataset_settings.exporter_settings.captured_image_size
# NOTE: Fallback to a default resolution.
else:
img_width = self.camera_intrinsics.res_width
img_height = self.camera_intrinsics.res_height
# img_width, img_height = 640, 480
# print("Image size: {} x {}".format(img_width, img_height))
# print("AnnotatedSceneViz - dataset_settings: {} - dataset_settings.exporter_settings: {}".format(
# dataset_settings, dataset_settings.exporter_settings))
# print("AnnotatedSceneViz - img_width: {} - img_height: {}".format(img_width, img_height))
# print("_scene_info.image_data = {}".format(self._scene_info.image_data))
# print("object_vizs = {}".format(self._object_vizs))
# print("AnnotatedSceneViz: img_width = {} - img_height = {} - image_data: {}".format(img_width, img_height, self._scene_info.image_data.shape))
if not (self._scene_info.image_data is None):
self.background_image = BackgroundImage.create_from_numpy_image_data(self._scene_info.image_data, img_width, img_height)
else:
self.background_image = None
info_str = self._scene_info.get_scene_info_str()
# print("Scene info: {}".format(info_str))
self.info_text = pyglet.text.Label(info_str,
font_size=16,
x=0, y=0,
# color=(255, 0, 0, 255),
anchor_x='left', anchor_y='baseline')
def set_image_data(self, new_image_numpy_data):
img_width, img_height = self.dataset_settings.exporter_settings.captured_image_size
print("set_image_data - img_width: {} - img_height: {}".format(img_width, img_height))
if (self.background_image is None):
self.background_image = BackgroundImage.create_from_numpy_image_data(new_image_numpy_data, img_width, img_height)
else:
self.background_image.load_image_data_from_numpy(new_image_numpy_data)
def draw(self, visualizer_settings=None):
for obj_viz in self._object_vizs:
# print('draw object: {}'.format(obj.source_file_path))
obj_viz.draw(visualizer_settings)
def set_text_color(self, new_text_color):
self.info_text.color = new_text_color
def update_settings(self, visualizer_settings=None):
if (visualizer_settings):
for obj_viz in self._object_vizs:
obj_viz.update_settings(visualizer_settings)
# TODO: Create a new viz object to handle the text overlay
# if (self.info_text):
# self.info_text.set_visibility()
# =============================== Visualizer ===============================
class VisualizerSettings(object):
def __init__(self):
self.render_mode = RenderMode.normal
self.show_mesh = True
self.show_pivot_axis = True
self.show_cuboid3d = True
self.show_cuboid2d = True
self.show_bb2d = True
self.show_info_text = True
self.show_keypoint2d = False
self.ignore_initial_matrix = False
# TODO: Find a way to use template for all these flags
def toggle_mesh(self):
self.show_mesh = not self.show_mesh
def toggle_pivot_axis(self):
self.show_pivot_axis = not self.show_pivot_axis
def toggle_cuboid3d(self):
self.show_cuboid3d = not self.show_cuboid3d
def toggle_cuboid2d(self):
self.show_cuboid2d = not self.show_cuboid2d
def toggle_bb2d(self):
self.show_bb2d = not self.show_bb2d
def toggle_keypoint2d(self):
print("toggle_keypoint2d==========================")
self.show_keypoint2d = not self.show_keypoint2d
def toggle_info_overlay(self):
self.show_info_text = not self.show_info_text
class NVDUVisualizer():
def __init__(self):
self.render_mode = RenderMode.normal
self.camera = Camera()
self.dataset_settings = None
self.visualizer_settings = VisualizerSettings()
self.annotated_scene = None
self.scene_viz = None
self.viewport = Viewport(None)
self.viewport.size = [512, 512]
def draw(self):
if (self.annotated_scene is None) or (self.scene_viz is None):
return
self.viewport.clear()
# TODO: Should let the AnnotatedSceneViz handle all these draw logic
self.viewport.scene_bg.add_object(self.scene_viz.background_image)
self.viewport.scene3d.camera = self.camera
if (not self.scene_viz.camera_intrinsics is None):
self.viewport.scene3d.camera.set_instrinsic_settings(self.scene_viz.camera_intrinsics)
# self.viewport.scene3d.camera.set_fovx(self.scene_viz.camera_fovx)
#TODO: Move this code to a separated function
# mesh_paths = []
# for obj_viz in self.scene_viz._object_vizs:
# mesh_paths.append(obj_viz.mesh.mesh_obj.source_file_path)
# GlobalModelManager.load_model_list(mesh_paths)
# print("NVDUVisualizer - draw - mesh_paths: {}".format(mesh_paths))
for obj in self.scene_viz._object_vizs:
if (obj.mesh):
obj.mesh.ignore_initial_matrix = self.visualizer_settings.ignore_initial_matrix
self.viewport.scene3d.add_object(obj.mesh)
self.viewport.scene3d.add_object(obj.cuboid3d)
self.viewport.scene3d.add_object(obj.pivot_axis)
self.viewport.scene_overlay.add_object(obj.cuboid2d)
self.viewport.scene_overlay.add_object(obj.keypoint2d)
if (self.visualizer_settings.show_info_text):
self.scene_viz.info_text.draw()
self.scene_viz.update_settings(self.visualizer_settings)
self.viewport.draw()
# ========================== CONTROL ==========================
def toggle_cuboid2d_overlay(self):
self.visualizer_settings.toggle_cuboid2d()
def toggle_cuboid3d_overlay(self):
self.visualizer_settings.toggle_cuboid3d()
def toggle_object_overlay(self):
self.visualizer_settings.toggle_mesh()
def toggle_pivot_axis(self):
self.visualizer_settings.toggle_pivot_axis()
def toggle_info_overlay(self):
self.visualizer_settings.toggle_info_overlay()
def toggle_keypoint2d_overlay(self):
self.visualizer_settings.toggle_keypoint2d()
def set_render_mode(self, new_render_mode):
self.visualizer_settings.render_mode = new_render_mode
def set_text_color(self, new_text_color):
self.scene_viz.set_text_color(new_text_color)
def visualize_dataset_frame(self, in_dataset, in_frame_index = 0):
frame_image_file_path, frame_data_file_path = in_dataset.get_frame_file_path_from_index(in_frame_index)
if not path.exists(frame_image_file_path):
print("Can't find image file for frame: {} - {}".format(in_frame_index, frame_image_file_path))
return
if not path.exists(frame_data_file_path):
print("Can't find annotation file for frame: {} - {}".format(in_frame_index, frame_data_file_path))
return
print("visualize_dataset_frame: frame_image_file_path: {} - frame_data_file_path: {}".format(
frame_image_file_path, frame_data_file_path))
frame_scene_data = AnnotatedSceneInfo.create_from_file(self.dataset_settings,
frame_data_file_path, frame_image_file_path)
self.visualize_scene(frame_scene_data)
def set_scene_data(self, new_scene_data):
self.annotated_scene = new_scene_data
self.scene_viz = AnnotatedSceneViz(self.annotated_scene)
def visualize_scene(self, annotated_scene):
self.set_scene_data(annotated_scene)
self.draw()
| Dataset_Utilities-master | nvdu/viz/nvdu_visualizer.py |
Dataset_Utilities-master | nvdu/viz/__init__.py |
|
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from .utils3d import *
from nvdu.core import transform3d
from .camera import *
# A Scene manage all of the objects need to be rendered
class Scene(object):
def __init__(self, owner_viewport):
self.viewport = owner_viewport
# List of the objects in the scene - SceneObject type
self.objects = []
# TODO: May need to derive from SceneObject
# self._is_visible = True
def clear(self):
self.objects = []
def add_object(self, new_obj):
# TODO: Make sure the new_obj is new and not already in the objects list?
self.objects.append(new_obj)
def draw(self):
pass
# ================================= Scene2d =================================
class Scene2d(Scene):
def __init__(self, owner_viewport):
super(Scene2d, self).__init__(owner_viewport)
def draw(self):
super(Scene2d, self).draw()
viewport_width, viewport_height = self.viewport.size
# print('Scene2d - {} - draw'.format(self))
# Disable depth when rendering 2d scene
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glDepthMask(False)
# Use Orthographic camera in full viewport size for 2d scene
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, viewport_width, 0.0, viewport_height)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
# NOTE: OpenCV 2d image coordinate system have Y going down
# while OpenGL have Y going up => need to flip the Y axis
# and add the viewport_height so the OpenCV coordinate appear right
glTranslatef(0.0, viewport_height, 0.0)
glMultMatrixf(get_opengl_matrixf(opencv_to_opengl_matrix))
# Render the objects in the scene
for obj in self.objects:
if (obj):
obj.draw()
glPopMatrix()
glDepthMask(True)
# ================================= Scene3d =================================
class Scene3d(Scene):
def __init__(self, owner_viewport):
super(Scene3d, self).__init__(owner_viewport)
self.camera = None
def draw(self):
super(Scene3d, self).draw()
# print('Scene3d - {} - draw'.format(self))
glPushMatrix()
self.camera.draw()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glPushMatrix()
glMultMatrixf(get_opengl_matrixf(opencv_to_opengl_matrix))
# TODO: Sort the 3d objects in the scene from back to front (Z reducing)
for child_object in self.objects:
if child_object:
child_object.draw()
glPopMatrix()
glPopMatrix()
| Dataset_Utilities-master | nvdu/viz/scene.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import os
from os import path
# import asyncio
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from pyglet.gl import *
from pyglet.gl.gl import *
from pyglet.gl.glu import *
from ctypes import *
import pywavefront
import pywavefront.visualization
from nvdu.core.mesh import *
from .utils3d import *
from .scene_object import *
from .pivot_axis import *
class Model3dManager(object):
def __init__(self):
self.model_map = {}
def get_model(self, model_path, auto_load = True):
if (model_path in self.model_map):
return self.model_map[model_path]
if (auto_load):
self.load_model(model_path)
return None
def load_model(self, model_path):
new_model = self.load_model_from_file(model_path)
self.model_map[model_path] = new_model
return new_model
# def load_model_list(self, model_paths):
# # print("load_model_list: {}".format(model_paths))
# tasks = []
# for check_path in model_paths:
# if not (check_path in self.model_map):
# tasks.append(asyncio.ensure_future(self.load_model(check_path)))
# if (len(tasks) > 0):
# # print("Load all the models: {}".format(model_paths))
# self.loop.run_until_complete(asyncio.wait(tasks))
def load_model_from_file(self, model_file_path):
if (path.exists(model_file_path)):
print("Model3dManager::load_model_from_file: {}".format(model_file_path))
return pywavefront.Wavefront(model_file_path)
else:
print("Model3dManager::load_model_from_file - can NOT find 3d model: {}".format(model_file_path))
return None
GlobalModelManager = Model3dManager()
class MeshViz(SceneObjectViz3d):
def __init__(self, mesh_obj):
super(MeshViz, self).__init__(mesh_obj)
self.mesh_obj = mesh_obj
self.mesh_model = None
# pivot_size = [10, 10, 10]
# self.pivot_axis = PivotAxis(pivot_size)
self.pivot_axis = None
self.ignore_initial_matrix = False
def on_draw(self):
super(MeshViz, self).on_draw()
if (self.mesh_model is None):
self.mesh_model = GlobalModelManager.get_model(self.mesh_obj.source_file_path)
if (self.mesh_model):
if (self.pivot_axis):
self.pivot_axis.draw()
glPolygonMode(GL_FRONT_AND_BACK, self.render_mode)
if (not self.ignore_initial_matrix):
mesh_initial_matrix = self.mesh_obj.get_initial_matrix()
# print("mesh_initial_matrix: {}".format(mesh_initial_matrix))
glMultMatrixf(get_opengl_matrixf(mesh_initial_matrix))
# TODO: Need to get the color from the object settings
glColor4f(1.0, 1.0, 0.0, 0.5)
self.mesh_model.draw()
glColor4f(1.0, 1.0, 1.0, 1.0)
glPolygonMode(GL_FRONT_AND_BACK, RenderMode.normal)
| Dataset_Utilities-master | nvdu/viz/mesh.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from .utils3d import *
from .scene import *
class Viewport(object):
def __init__(self, context):
self._context = context
self._size = [0, 0]
self.scene_bg = Scene2d(self)
self.scene3d = Scene3d(self)
self.scene_overlay = Scene2d(self)
self.scenes = [
self.scene_bg,
self.scene3d,
self.scene_overlay
]
@property
def size(self):
return self._size
@size.setter
def size(self, new_size):
self._size[0] = new_size[0]
self._size[1] = new_size[1]
def clear(self):
for scene in self.scenes:
if (scene):
scene.clear()
def draw(self):
for scene in self.scenes:
if (scene):
scene.draw()
| Dataset_Utilities-master | nvdu/viz/viewport.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
from pyglet.gl import *
from pyglet.gl.gl import c_float, c_double, c_int, glGetFloatv, GL_MODELVIEW_MATRIX
from pyglet.gl.glu import *
import numpy as np
from .utils3d import *
from nvdu.core import transform3d
from nvdu.core import scene_object
from nvdu.core.camera import *
from .scene_object import *
# A Camera handle the perspective and frustrum of a scene
class Camera(SceneObjectViz3d):
# DEFAULT_ZNEAR = 0.000001
# DEFAULT_ZNEAR = 0.00001
DEFAULT_ZNEAR = 1
DEFAULT_ZFAR = 100000.0
# Horizontal field of view of the camera (in degree)
DEFAULT_FOVX = 90.0
DEFAULT_ASPECT_RATIO_XY = 1920.0 / 1080.0
def __init__(self, cam_intrinsic_settings = CameraIntrinsicSettings(), scene_object = None):
super(Camera, self).__init__(scene_object)
self.camera_matrix = Matrix44.identity()
self.projection_matrix = Matrix44.identity()
self.set_instrinsic_settings(cam_intrinsic_settings)
def draw(self):
# glClipControl(GL_LOWER_LEFT, GL_ZERO_TO_ONE)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# TODO: Need to invert the CameraToWorld matrix => WorldToCamera matrix
# transform_matrix = self.relative_transform.to_matrix()
# glMultMatrixf(get_opengl_matrixf(transform_matrix))
# self.build_perspective_projection_matrix()
# print("on_draw - camera_fov: {}".format(self.camera_fov))
# print("camera - on_draw - projection_matrix: {}".format(self.projection_matrix))
glMultMatrixf(get_opengl_matrixf(self.projection_matrix))
# Scale up the scene so the objects really close to the camera doesn't get clipped by the near plane
# glScalef(100.0, 100.0, 100.0)
# Set up the camera using its intrinsic parameters:
# (fx, fy): focal length
# (cx, cy): optical center
def set_instrinsic_settings(self, cam_intrinsic_settings):
self.intrinsic_settings = cam_intrinsic_settings
if (cam_intrinsic_settings):
self.projection_matrix = cam_intrinsic_settings.get_projection_matrix()
# print("set_instrinsic_settings: {} - projection matrix: {}".format(str(self.intrinsic_settings), self.projection_matrix))
def set_perspective_params(self, fovy, aspect_ratio_xy, znear = DEFAULT_ZNEAR, zfar = DEFAULT_ZFAR):
self.fovy = np.deg2rad(fovy)
self.aspect_ratio_xy = aspect_ratio_xy
self.znear = znear
self.zfar = zfar
self.build_perspective_projection_matrix()
def set_fovx(self, new_fovx):
# new_fovy = convert_HFOV_to_VFOV(new_fovx, 1.0 / self.aspect_ratio_xy)
# self.set_fovy(new_fovy)
# self.build_perspective_projection_matrix()
new_cam_instrinsics = CameraIntrinsicSettings.from_perspective_fov_horizontal(
self.intrinsic_settings.res_width, self.intrinsic_settings.res_height, new_fovx)
def build_perspective_projection_matrix(self):
zdiff = float(self.znear - self.zfar)
fovy_tan = np.tan(self.fovy / 2.0)
# TODO: Handle fovy_tan = 0?
a = 1.0 / (fovy_tan * self.aspect_ratio_xy)
b = 1.0 / fovy_tan
# print('a: {} - b: {}'.format(a, b))
c = (self.znear + self.zfar) / zdiff
d = 2 * (self.znear * self.zfar) / zdiff
self.projection_matrix = Matrix44([
[a, 0, 0, 0],
[0, b, 0, 0],
[0, 0, c, -1.0],
[0, 0, d, 0]
])
# print('build_perspective_projection_matrix: {} - znear: {} - zfar: {} - aspect_ratio_xy: {} - fovy: {}'.format(
# self.projection_matrix, self.znear, self.zfar, self.aspect_ratio_xy, self.fovy))
def set_viewport_size(self, viewport_size):
self.viewport_size = viewport_size
| Dataset_Utilities-master | nvdu/viz/camera.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from pyglet.gl import *
from ctypes import *
from .scene_object import *
from nvdu.core.cuboid import *
# ========================= PointCloud2d =========================
class PointCloud2d(SceneObjectVizBase):
# Create a box with a certain size
def __init__(self, point2d_list=[], in_color=None):
super(PointCloud2d, self).__init__(point2d_list)
self.vertices = point2d_list
self.color = in_color
self.generate_vertexes_buffer()
def generate_vertexes_buffer(self):
self.vertices_gl = []
vertex_count = len(self.vertices)
for i in range(0, vertex_count):
vertex = self.vertices[i]
if (not vertex is None):
self.vertices_gl.append(vertex[0])
self.vertices_gl.append(vertex[1])
self.indices_point_gl = []
for i in range (len(self.vertices)):
if (not self.vertices[i] is None):
self.indices_point_gl.append(i)
self.vertex_gl_array = (GLfloat* len(self.vertices_gl))(*self.vertices_gl)
self.indices_point_gl_array = (GLubyte* len(self.indices_point_gl))(*self.indices_point_gl)
# print("PointCloud2d: {}".format(self.vertices_gl))
def on_draw(self):
super(PointCloud2d, self).on_draw()
# print("Drawing pointcloud: {}".format(self.vertices_gl))
glEnableClientState(GL_VERTEX_ARRAY)
# glEnableClientState(GL_COLOR_ARRAY)
glPolygonMode(GL_FRONT_AND_BACK, RenderMode.normal)
glVertexPointer(2, GL_FLOAT, 0, self.vertex_gl_array)
# glColorPointer(4, GL_UNSIGNED_BYTE, 0, self.vertex_color_gl_array)
glPointSize(10.0)
glDrawElements(GL_POINTS, len(self.indices_point_gl_array), GL_UNSIGNED_BYTE, self.indices_point_gl_array)
glPointSize(1.0)
# Deactivate vertex arrays after drawing
glDisableClientState(GL_VERTEX_ARRAY)
# glDisableClientState(GL_COLOR_ARRAY)
| Dataset_Utilities-master | nvdu/viz/pointcloud.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from pyglet.gl.gl import *
from ctypes import *
# Get the openGL matrix (GLfloat* 16) from a Matrix44 type
# NOTE: OpenGL use column major while OpenCV use row major
def get_opengl_matrixf(in_mat44):
return (GLfloat* 16)(
in_mat44.m11, in_mat44.m12, in_mat44.m13, in_mat44.m14,
in_mat44.m21, in_mat44.m22, in_mat44.m23, in_mat44.m24,
in_mat44.m31, in_mat44.m32, in_mat44.m33, in_mat44.m34,
in_mat44.m41, in_mat44.m42, in_mat44.m43, in_mat44.m44
)
def convert_HFOV_to_VFOV(hfov, hw_ratio):
# https://en.wikipedia.org/wiki/Field_of_view_in_video_games
vfov = 2 * np.arctan(np.tan(np.deg2rad(hfov / 2)) * hw_ratio)
return np.rad2deg(vfov)
opencv_to_opengl_matrix = Matrix44([
[1.0, 0.0, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
])
class RenderMode():
normal = GL_FILL
wire_frame = GL_LINE
point = GL_POINT
| Dataset_Utilities-master | nvdu/viz/utils3d.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
from pyrr import Quaternion, Matrix44, Vector3, euler
import numpy as np
from pyglet.gl import *
from ctypes import *
from .scene_object import *
from nvdu.core.cuboid import *
# ========================= Cuboid3d =========================
# TODO: Should merge Cuboid and Box3d
class Cuboid3dViz(SceneObjectViz3d):
# Create a box with a certain size
def __init__(self, cuboid3d, in_color=None):
super(Cuboid3dViz, self).__init__(cuboid3d)
self.cuboid3d = cuboid3d
self.vertices = self.cuboid3d.get_vertices()
self.render_line = True
self.color = in_color
self.face_alpha = 128
# self.render_line = False
self.generate_vertex_buffer()
def generate_vertex_buffer(self):
self.vertices_gl = []
for i in range(0, CuboidVertexType.TotalCornerVertexCount):
self.vertices_gl.append(self.vertices[i][0])
self.vertices_gl.append(self.vertices[i][1])
self.vertices_gl.append(self.vertices[i][2])
# List of color for each vertices of the box
if (self.color is None):
self.colors_gl = [
0, 0, 255, 255, # Front Top Right
0, 0, 255, 255, # Front Top Left
255, 0, 255, 255, # Front Bottom Left
255, 0, 255, 255, # Front Bottom Right
0, 255, 0, 255, # Rear Top Right
0, 255, 0, 255, # Rear Top Left
255, 255, 0, 255, # Rear Bottom Left
255, 255, 0, 255, # Rear Bottom Right
]
else:
self.colors_gl = []
for i in range(0, CuboidVertexType.TotalCornerVertexCount):
for color_channel in self.color:
self.colors_gl.append(color_channel)
# Reduce the alpha of the vertex colors when we rendering triangles
self.colors_tri_gl = list(int(color / 4) for color in self.colors_gl)
cvt = CuboidVertexType
# Counter-Clockwise order triangle indices
self.indices_tri_gl = [
# Front face
cvt.FrontBottomLeft, cvt.FrontTopLeft, cvt.FrontTopRight,
cvt.FrontTopRight, cvt.FrontBottomRight, cvt.FrontBottomLeft,
# Right face
cvt.FrontBottomRight, cvt.FrontTopRight, cvt.RearBottomRight,
cvt.RearTopRight, cvt.RearBottomRight, cvt.FrontTopRight,
# Back face
cvt.RearBottomLeft, cvt.RearBottomRight, cvt.RearTopRight,
cvt.RearTopRight, cvt.RearTopLeft, cvt.RearBottomLeft,
# Left face
cvt.FrontTopLeft, cvt.FrontBottomLeft, cvt.RearBottomLeft,
cvt.RearBottomLeft, cvt.RearTopLeft, cvt.FrontTopLeft,
# Top face
cvt.RearTopLeft, cvt.RearTopRight, cvt.FrontTopRight,
cvt.FrontTopRight, cvt.FrontTopLeft, cvt.RearTopLeft,
# Bottom face
cvt.RearBottomLeft, cvt.FrontBottomLeft, cvt.FrontBottomRight,
cvt.FrontBottomRight, cvt.RearBottomRight, cvt.RearBottomLeft,
]
self.indices_line_gl = np.array(CuboidLineIndexes).flatten()
# print("indices_line_gl: {}".format(self.indices_line_gl))
self.indices_point_gl = list(range(0, len(self.vertices)))
# print('indices_point_gl: {}'.format(self.indices_point_gl))
self.vertex_gl_array = (GLfloat* len(self.vertices_gl))(*self.vertices_gl)
self.color_gl_array = (GLubyte* len(self.colors_gl))(*self.colors_gl)
self.colors_tri_gl_array = (GLubyte* len(self.colors_tri_gl))(*self.colors_tri_gl)
self.indices_tri_gl_array = (GLubyte* len(self.indices_tri_gl))(*self.indices_tri_gl)
self.indices_line_gl_array = (GLubyte* len(self.indices_line_gl))(*self.indices_line_gl)
self.indices_point_gl_array = (GLubyte* len(self.indices_point_gl))(*self.indices_point_gl)
def on_draw(self):
super(Cuboid3dViz, self).on_draw()
# print('Cuboid3dViz - on_draw - vertices: {}'.format(self.vertices))
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
# glEnable(GL_POLYGON_SMOOTH)
glPolygonMode(GL_FRONT_AND_BACK, RenderMode.normal)
glVertexPointer(3, GL_FLOAT, 0, self.vertex_gl_array)
# Render each faces of the cuboid
glColorPointer(4, GL_UNSIGNED_BYTE, 0, self.colors_tri_gl_array)
glDrawElements(GL_TRIANGLES, len(self.indices_tri_gl), GL_UNSIGNED_BYTE, self.indices_tri_gl_array)
# Render each edge lines
glEnable(GL_LINE_SMOOTH)
glLineWidth(3.0)
glColorPointer(4, GL_UNSIGNED_BYTE, 0, self.color_gl_array)
# TODO: May want to use GL_LINE_STRIP or GL_LINE_LOOP
glDrawElements(GL_LINES, len(self.indices_line_gl), GL_UNSIGNED_BYTE, self.indices_line_gl_array)
glDisable(GL_LINE_SMOOTH)
# Render each corner vertices in POINTS mode
glPointSize(10.0)
glDrawElements(GL_POINTS, len(self.indices_point_gl_array), GL_UNSIGNED_BYTE, self.indices_point_gl_array)
glPointSize(1.0)
# Deactivate vertex arrays after drawing
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
# glDisable(GL_POLYGON_SMOOTH)
# ========================= Cuboid2d =========================
class Cuboid2dViz(SceneObjectVizBase):
# Create a box with a certain size
def __init__(self, cuboid2d, in_color=None):
super(Cuboid2dViz, self).__init__(cuboid2d)
self.cuboid2d = cuboid2d
self.color = in_color
if not (self.cuboid2d is None):
self.vertices = self.cuboid2d.get_vertices()
self.generate_vertexes_buffer()
# self.render_line = False
def generate_vertexes_buffer(self):
self.vertices_gl = []
max_vertex_count = min(CuboidVertexType.TotalVertexCount, len(self.vertices))
for i in range(0, max_vertex_count):
vertex = self.vertices[i]
if (not vertex is None):
self.vertices_gl.append(self.vertices[i][0])
self.vertices_gl.append(self.vertices[i][1])
else:
self.vertices_gl.append(0.0)
self.vertices_gl.append(0.0)
# List of color for each vertices of the box
self.vertex_colors_gl = [
0, 0, 255, 255, # Front Top Right
0, 0, 255, 255, # Front Top Left
255, 0, 255, 255, # Front Bottom Left
255, 0, 255, 255, # Front Bottom Right
0, 255, 0, 255, # Rear Top Right
0, 255, 0, 255, # Rear Top Left
255, 255, 0, 255, # Rear Bottom Left
255, 255, 0, 255, # Rear Bottom Right
]
# List of color for each vertices of the box
if (self.color is None):
self.edge_colors_gl = self.vertex_colors_gl
else:
self.edge_colors_gl = []
for i in range(0, CuboidVertexType.TotalCornerVertexCount):
for color_channel in self.color:
self.edge_colors_gl.append(color_channel)
# NOTE: Only add valid lines:
self.indices_line_gl = []
for line in CuboidLineIndexes:
vi0, vi1 = line
v0 = self.vertices[vi0]
v1 = self.vertices[vi1]
if not (v0 is None) and not (v1 is None):
self.indices_line_gl.append(vi0)
self.indices_line_gl.append(vi1)
# print('indices_line_gl: {}'.format(self.indices_line_gl))
# self.indices_line_gl = [
# # Front face
# cvt.FrontTopLeft, cvt.FrontTopRight,
# cvt.FrontTopRight, cvt.FrontBottomRight,
# cvt.FrontBottomRight, cvt.FrontBottomLeft,
# cvt.FrontBottomLeft, cvt.FrontTopLeft,
# # Back face
# cvt.RearTopLeft, cvt.RearTopRight,
# cvt.RearTopRight, cvt.RearBottomRight,
# cvt.RearBottomRight, cvt.RearBottomLeft,
# cvt.RearBottomLeft, cvt.RearTopLeft,
# # Left face
# cvt.FrontBottomLeft, cvt.RearBottomLeft,
# cvt.FrontTopLeft, cvt.RearTopLeft,
# # Right face
# cvt.FrontBottomRight, cvt.RearBottomRight,
# cvt.FrontTopRight, cvt.RearTopRight,
# ]
# self.indices_point_gl = list(i for i in range(0, len(self.vertices)))
# NOTE: Only add valid points:
self.indices_point_gl = []
for i in range (len(self.vertices)):
if (not self.vertices[i] is None):
self.indices_point_gl.append(i)
# print('indices_point_gl: {}'.format(self.indices_point_gl))
self.vertex_gl_array = (GLfloat* len(self.vertices_gl))(*self.vertices_gl)
self.vertex_color_gl_array = (GLubyte* len(self.vertex_colors_gl))(*self.vertex_colors_gl)
self.edge_colors_gl_array = (GLubyte* len(self.edge_colors_gl))(*self.edge_colors_gl)
self.indices_line_gl_array = (GLubyte* len(self.indices_line_gl))(*self.indices_line_gl)
self.indices_point_gl_array = (GLubyte* len(self.indices_point_gl))(*self.indices_point_gl)
def on_draw(self):
if (self.cuboid2d is None):
return
super(Cuboid2dViz, self).on_draw()
# print('Cuboid2dViz - on_draw - vertices: {}'.format(self.vertices))
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
# glEnable(GL_POLYGON_SMOOTH)
glPolygonMode(GL_FRONT_AND_BACK, RenderMode.normal)
glVertexPointer(2, GL_FLOAT, 0, self.vertex_gl_array)
glEnable(GL_LINE_SMOOTH)
glLineWidth(3.0)
glColorPointer(4, GL_UNSIGNED_BYTE, 0, self.edge_colors_gl_array)
# TODO: May want to use GL_LINE_STRIP or GL_LINE_LOOP
glDrawElements(GL_LINES, len(self.indices_line_gl), GL_UNSIGNED_BYTE, self.indices_line_gl_array)
glDisable(GL_LINE_SMOOTH)
glColorPointer(4, GL_UNSIGNED_BYTE, 0, self.vertex_color_gl_array)
glPointSize(10.0)
glDrawElements(GL_POINTS, len(self.indices_point_gl_array), GL_UNSIGNED_BYTE, self.indices_point_gl_array)
glPointSize(1.0)
# Deactivate vertex arrays after drawing
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
# glDisable(GL_POLYGON_SMOOTH)
| Dataset_Utilities-master | nvdu/viz/cuboid.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import numpy as np
from pyrr import Quaternion, Matrix44, Vector3, euler
from pyglet.gl import *
from pyglet.gl.gl import *
from pyglet.gl.glu import *
from ctypes import *
from .scene_object import *
class PivotAxis(SceneObjectViz3d):
# Create a pivot axis object with 3 axes each can have different length
def __init__(self, in_pivot_axis_obj, in_line_width = 5.0):
super(PivotAxis, self).__init__(in_pivot_axis_obj)
self.pivot_obj = in_pivot_axis_obj
self.origin_loc = self.pivot_obj.origin_loc
self.line_width = in_line_width
self.render_stipple_line = False
# List of color for each vertices of the box
self.colors = [
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
]
def on_draw(self):
super(PivotAxis, self).on_draw()
glEnable(GL_LINE_SMOOTH)
if (self.render_stipple_line):
glEnable(GL_LINE_STIPPLE)
line_pattern = 0x00ff
line_tripple_factor = 1
glLineStipple(line_tripple_factor, line_pattern);
glLineWidth(self.line_width)
glBegin(GL_LINES)
# X axis
glColor3ub(self.colors[0][0], self.colors[0][1], self.colors[0][2])
glVertex3f(self.origin_loc[0], self.origin_loc[1], self.origin_loc[2])
glVertex3f(self.pivot_obj.x_axis[0], self.pivot_obj.x_axis[1], self.pivot_obj.x_axis[2])
# Y axis
glColor3ub(self.colors[1][0], self.colors[1][1], self.colors[1][2])
glVertex3f(self.origin_loc[0], self.origin_loc[1], self.origin_loc[2])
glVertex3f(self.pivot_obj.y_axis[0], self.pivot_obj.y_axis[1], self.pivot_obj.y_axis[2])
# Z axis
glColor3ub(self.colors[2][0], self.colors[2][1], self.colors[2][2])
glVertex3f(self.origin_loc[0], self.origin_loc[1], self.origin_loc[2])
glVertex3f(self.pivot_obj.z_axis[0], self.pivot_obj.z_axis[1], self.pivot_obj.z_axis[2])
glEnd()
glColor3ub(255, 255, 255)
glLineWidth(1.0)
if (self.render_stipple_line):
glDisable(GL_LINE_STIPPLE)
glDisable(GL_LINE_SMOOTH) | Dataset_Utilities-master | nvdu/viz/pivot_axis.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import cv2
import numpy as np
from pyglet.gl import *
class BackgroundImage(object):
def __init__(self, width = 0, height = 0):
self.width = width
self.height = height
self.location = [0, 0, 0]
self.vlist = pyglet.graphics.vertex_list(4,
('v2f', [0,0, width,0, 0,height, width,height]),
('t2f', [0,0, width,0, 0,height, width,height]))
self.scale = [self.width, self.height, 1.0]
@classmethod
def create_from_numpy_image_data(cls, numpy_image_data, width = 0, height = 0):
img_width = numpy_image_data.shape[1] if (width == 0) else width
img_height = numpy_image_data.shape[0] if (height == 0) else height
new_image = cls(img_width, img_height)
new_image.load_image_data_from_numpy(numpy_image_data)
return new_image
@classmethod
def create_from_file_path(cls, image_file_path, width = 0, height = 0):
image_np = np.array(cv2.imread(image_file_path))
image_np = image_np[:,:,::-1] # Convert BGR to RGB format. Alternatively, use cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return cls.create_from_numpy_image_data(image_np, width, height)
def load_image_data_from_numpy(self, numpy_image_data):
width = numpy_image_data.shape[1]
height = numpy_image_data.shape[0]
color_channel_count = numpy_image_data.shape[2]
pitch = -width * color_channel_count
# print('numpy_image_data.shape: {}'.format(numpy_image_data.shape))
img_data = numpy_image_data
img_data = img_data.tostring()
self.image = pyglet.image.ImageData(width, height, 'RGB', img_data, pitch)
self.texture = self.image.get_texture(True, True)
def load_image_from_file(self, image_file_path):
self.image = pyglet.image.load(image_file_path)
self.texture = self.image.get_texture(True, True)
def load_new_image(self, image_file_path):
self.image = pyglet.image.load(image_file_path)
self.texture = self.image.get_texture(True, True)
# print('Texture: {} - id: {} - target:{} - width: {} - height: {}'.format(
# self.texture, self.texture.id, self.texture.target, self.texture.width, self.texture.height))
# print('GL_TEXTURE_RECTANGLE_ARB: {} - GL_TEXTURE_RECTANGLE_NV: {} - GL_TEXTURE_2D: {}'.format(
# GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_RECTANGLE_NV, GL_TEXTURE_2D))
def draw(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
x, y, z = self.location
glTranslatef(x, y, z)
glColor3f(1, 1, 1)
texture_target = self.texture.target
glEnable(texture_target)
glBindTexture(texture_target, self.texture.id)
self.vlist.draw(GL_TRIANGLE_STRIP)
glDisable(texture_target)
glPopMatrix()
| Dataset_Utilities-master | nvdu/viz/background_image.py |
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
# License. (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
import future
import os
from os import path
import pyglet
from pyglet.window import key
from nvdu.viz.nvdu_visualizer import *
from nvdu.core.nvdu_data import *
class NVDUVizWindow(pyglet.window.Window):
DEFAULT_EXPORT_DIR = "viz"
def __init__(self, width, height, caption =''):
super(NVDUVizWindow, self).__init__(width, height, caption)
self._org_caption = caption
print('Window created: width = {} - height = {} - title = {}'.format(self.width, self.height, self.caption))
# print('Window context: {} - config: {}'.format(self.context, self.context.config))
self.frame_index = 0
self.visualizer = NVDUVisualizer()
self.auto_change_frame = False
self.auto_fps = 0
self._dataset = None
self.export_dir = ""
self._should_export = False
@property
def dataset(self):
return self._dataset
@dataset.setter
def dataset(self, new_dataset):
self._dataset = new_dataset
frame_count = self._dataset.scan()
print("Number of frames in the dataset: {}".format(frame_count))
@property
def should_export(self):
# Can export if the export directory is valid
# return not (not self.export_dir)
return self._should_export and self.export_dir
@should_export.setter
def should_export(self, new_export):
self._should_export = new_export
def set_caption_postfix(self, postfix):
self.set_caption(self._org_caption + postfix)
def setup(self):
glClearColor(0, 0, 0, 1)
glEnable(GL_DEPTH_TEST)
# glEnable(GL_DEPTH_CLAMP)
glFrontFace(GL_CCW)
# glFrontFace(GL_CW)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.visualize_current_frame()
def on_draw(self):
# Clear the current GL Window
self.clear()
self.update_text_color()
if (self.visualizer):
self.visualizer.draw()
if (self.should_export):
self.save_current_viz_frame()
def on_resize(self, width, height):
super(NVDUVizWindow, self).on_resize(width, height)
# set the Viewport
glViewport(0, 0, width, height)
self.visualizer.viewport.size = [width, height]
# new_cam_intrinsic_settings = CameraIntrinsicSettings.from_perspective_fov_horizontal(width, height, CAMERA_FOV_HORIZONTAL)
# self.visualizer.camera.set_instrinsic_settings(new_cam_intrinsic_settings)
def set_camera_intrinsic_settings(self, new_cam_intrinsic_settings):
# print("set_camera_intrinsic_settings: {}".format(new_cam_intrinsic_settings))
self.visualizer.camera.set_instrinsic_settings(new_cam_intrinsic_settings)
# Save the current screenshot to a file
def save_screenshot(self, export_path):
screen_image = pyglet.image.get_buffer_manager().get_color_buffer()
screen_image.save(export_path)
print("save_screenshot: {}".format(export_path))
def save_current_viz_frame(self):
# TODO: Should ignore? if the visualized frame already exist
current_frame_name = self.dataset.get_frame_name_from_index(self.frame_index)
# TODO: May need to add config to control the viz postfix
viz_frame_file_name = current_frame_name + "_viz.png"
export_viz_path = path.join(self.export_dir, viz_frame_file_name)
if not path.exists(self.export_dir):
os.makedirs(self.export_dir)
self.save_screenshot(export_viz_path)
# ========================== DATA PROCESSING ==========================
def visualize_current_frame(self):
print('Visualizing frame: {}'.format(self.frame_index))
self.visualizer.visualize_dataset_frame(self.dataset, self.frame_index)
def set_frame_index(self, new_frame_index):
total_frame_count = self.dataset.frame_count
if (new_frame_index < 0):
new_frame_index += total_frame_count
# TODO: May need to update the total frame count when it's invalid
if (total_frame_count > 0):
new_frame_index = new_frame_index % total_frame_count
if (self.frame_index != new_frame_index):
self.frame_index = new_frame_index
self.visualize_current_frame()
# ========================== INPUT CONTROL ==========================
def on_key_press(self, symbol, modifiers):
super(NVDUVizWindow, self).on_key_press(symbol, modifiers)
if (symbol == key.F3):
self.toggle_cuboid2d_overlay()
if (symbol == key.F4):
self.toggle_cuboid3d_overlay()
elif (symbol == key.F5):
self.toggle_object_overlay()
elif (symbol == key.F6):
self.toggle_pivot()
elif (symbol == key.F7):
self.toggle_info_overlay()
elif (symbol == key.F8):
self.toggle_keypoint2d_overlay()
elif (symbol == key.F12):
self.toggle_export_viz_frame()
elif (symbol == key._1):
self.visualizer.set_render_mode(RenderMode.normal)
elif (symbol == key._2):
self.visualizer.set_render_mode(RenderMode.wire_frame)
elif (symbol == key._3):
self.visualizer.set_render_mode(RenderMode.point)
elif (symbol == key.SPACE):
self.toggle_auto_change_frame()
def on_text_motion(self, motion):
if motion == key.LEFT:
self.set_frame_index(self.frame_index - 1)
elif motion == key.RIGHT:
self.visualize_next_frame()
elif motion == key.UP:
self.set_frame_index(self.frame_index + 100)
elif motion == key.DOWN:
self.set_frame_index(self.frame_index - 100)
def visualize_next_frame(self, dt=0):
self.set_frame_index(self.frame_index + 1)
def toggle_export_viz_frame(self):
self._should_export = not self._should_export
if (self._should_export and not self.export_dir):
self.export_dir = NVDUVizWindow.DEFAULT_EXPORT_DIR
self.update_text_color()
def update_text_color(self):
# Use different color when we are exporting visualized frame
if (self.should_export):
self.set_caption_postfix(" - Exporting ...")
self.visualizer.set_text_color((255, 0, 0, 255))
else:
self.set_caption_postfix("")
self.visualizer.set_text_color((255, 255, 255, 255))
def toggle_cuboid2d_overlay(self):
self.visualizer.toggle_cuboid2d_overlay()
def toggle_cuboid3d_overlay(self):
self.visualizer.toggle_cuboid3d_overlay()
def toggle_object_overlay(self):
self.visualizer.toggle_object_overlay()
def toggle_pivot(self):
self.visualizer.toggle_pivot_axis()
def toggle_keypoint2d_overlay(self):
self.visualizer.toggle_keypoint2d_overlay()
def toggle_info_overlay(self):
self.visualizer.toggle_info_overlay()
def toggle_auto_change_frame(self):
self.set_auto_change_frame(not self.auto_change_frame)
def set_auto_fps(self, new_fps):
self.auto_fps = new_fps
if (new_fps <= 0):
self.set_auto_change_frame(False)
else:
self.set_auto_change_frame(True)
def set_auto_change_frame(self, new_bool):
if (self.auto_change_frame == new_bool):
return
self.auto_change_frame = new_bool
if (self.auto_change_frame):
print("Start auto changing frame ...")
wait_duration = 1.0 / self.auto_fps
pyglet.clock.schedule_interval(self.visualize_next_frame, wait_duration)
else:
print("Stop auto changing frame ...")
pyglet.clock.unschedule(self.visualize_next_frame)
| Dataset_Utilities-master | nvdu/viz/nvdu_viz_window.py |
# Import SparkSession
from pyspark.sql import SparkSession
import time
# Create SparkSession
spark = SparkSession.builder.appName("s3-read-test").getOrCreate()
# this is a public bucket containing some sample data for testing
data_path = "s3a://tpcds-share-emr/sf10-parquet/useDecimal=true,useDate=true,filterNull=false/call_center"
column_name = "cc_call_center_id"
spark.read.parquet(data_path).select(column_name).show()
# sleep 300 seconds to allow user to check the Spark UI
time.sleep(300)
spark.stop()
| spark-rapids-container-dev | k8s/read-s3-test.py |
# NVIDIA GPU metric module using the Python bindings for NVML
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions of this file are copied/derived from
# https://github.com/ganglia/gmond_python_modules/blob/master/gpu/nvidia/python_modules/nvidia.py
#
# (C)opyright 2011, 2012 Bernard Li <[email protected]>
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import datetime
from pynvml import *
from random import randint
import time
descriptors = list()
device = 0
eventSet = 0
violation_dur = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
'''Return the descriptor based on the name'''
def find_descriptor(name):
for d in descriptors:
if d['name'] == name:
return d
'''Build descriptor from arguments and append it to the global descriptors list if call_back does not return with error'''
def build_descriptor(name, call_back, time_max, value_type, units, slope, format, description, groups):
d = {'name': name,
'call_back': call_back,
'time_max': time_max,
'value_type': value_type,
'units': units,
'slope': slope,
'format': format,
'description': description,
'groups': groups,
}
try:
call_back(name)
descriptors.append(d)
except NVMLError, err:
print "Failed to build descriptor :", name, ":", str(err)
pass
except NameError, err:
print "Failed to build descriptor :", name, ":", str(err)
pass
def get_gpu_num():
return int(nvmlDeviceGetCount())
def get_gpu_use_num(name):
use_num = 0
for i in range(get_gpu_num()):
is_use = gpu_device_handler('gpu%s_process' %i)
if(int(is_use)):
use_num += 1
return use_num
def gpu_num_handler(name):
return get_gpu_num()
def gpu_driver_version_handler(name):
return nvmlSystemGetDriverVersion()
def gpu_get_device_by_name(name):
d = find_descriptor(name)
(gpu, metric) = name.split('_', 1)
gpu_id = int(gpu.split('gpu')[1])
gpu_device = nvmlDeviceGetHandleByIndex(gpu_id)
return gpu_device
def gpu_device_handler(name):
global violation_dur, violation_rate
(gpu, metric) = name.split('_', 1)
gpu_id = int(gpu.split('gpu')[1])
gpu_device = gpu_get_device_by_name(name)
if (metric == 'type'):
return nvmlDeviceGetName(gpu_device)
elif (metric == 'uuid'):
return nvmlDeviceGetUUID(gpu_device)
elif (metric == 'pci_id'):
return nvmlDeviceGetPciInfo(gpu_device).pciDeviceId
elif (metric == 'temp'):
return nvmlDeviceGetTemperature(gpu_device, NVML_TEMPERATURE_GPU)
elif (metric == 'mem_total'):
return int(nvmlDeviceGetMemoryInfo(gpu_device).total/(1024*1024))
elif (metric == 'fb_memory'):
return int(nvmlDeviceGetMemoryInfo(gpu_device).used/1048576)
elif (metric == 'util'):
return nvmlDeviceGetUtilizationRates(gpu_device).gpu
elif (metric == 'mem_util'):
return nvmlDeviceGetUtilizationRates(gpu_device).memory
elif (metric == 'fan'):
try:
return nvmlDeviceGetFanSpeed(gpu_device)
except NVMLError, nvmlError:
# Not all GPUs have fans - a fatal error would not be appropriate
if NVML_ERROR_NOT_SUPPORTED == nvmlError.value:
return 0
elif (metric == 'ecc_mode'):
try:
ecc_mode = nvmlDeviceGetPendingEccMode(gpu_device)
if (NVML_FEATURE_DISABLED == ecc_mode):
return "OFF"
elif (NVML_FEATURE_ENABLED == ecc_mode):
return "ON"
else:
return "UNKNOWN"
except NVMLError, nvmlError:
if NVML_ERROR_NOT_SUPPORTED == nvmlError.value:
return 'N/A'
elif (metric == 'perf_state' or metric == 'performance_state'):
state = nvmlDeviceGetPerformanceState(gpu_device)
try:
int(state)
return "P%s" % state
except ValueError:
return state
elif (metric == 'graphics_clock_report'):
return nvmlDeviceGetClockInfo(gpu_device, NVML_CLOCK_GRAPHICS)
elif (metric == 'sm_clock_report'):
return nvmlDeviceGetClockInfo(gpu_device, NVML_CLOCK_SM)
elif (metric == 'mem_clock_report'):
return nvmlDeviceGetClockInfo(gpu_device, NVML_CLOCK_MEM)
elif (metric == 'max_graphics_clock'):
return nvmlDeviceGetMaxClockInfo(gpu_device, NVML_CLOCK_GRAPHICS)
elif (metric == 'max_sm_clock'):
return nvmlDeviceGetMaxClockInfo(gpu_device, NVML_CLOCK_SM)
elif (metric == 'max_mem_clock'):
return nvmlDeviceGetMaxClockInfo(gpu_device, NVML_CLOCK_MEM)
elif (metric == 'power_usage_report'):
return nvmlDeviceGetPowerUsage(gpu_device)/1000
elif (metric == 'serial'):
return nvmlDeviceGetSerial(gpu_device)
elif (metric == 'power_man_mode'):
pow_man_mode = nvmlDeviceGetPowerManagementMode(gpu_device)
if (NVML_FEATURE_DISABLED == pow_man_mode):
return "OFF"
elif (NVML_FEATURE_ENABLED == pow_man_mode):
return "ON"
else:
return "UNKNOWN"
elif (metric == 'power_man_limit'):
powerLimit = nvmlDeviceGetPowerManagementLimit(gpu_device)
return powerLimit/1000
elif (metric == 'ecc_db_error'):
eccCount = nvmlDeviceGetTotalEccErrors(gpu_device, 1, 1)
return eccCount
elif (metric == 'ecc_sb_error'):
eccCount = nvmlDeviceGetTotalEccErrors(gpu_device, 0, 1)
return eccCount
elif (metric == 'bar1_memory'):
memory = nvmlDeviceGetBAR1MemoryInfo(gpu_device)
return int(memory.bar1Used/1000000)
elif (metric == 'bar1_max_memory'):
memory = nvmlDeviceGetBAR1MemoryInfo(gpu_device)
return int(memory.bar1Total/1000000)
elif (metric == 'shutdown_temp'):
return nvmlDeviceGetTemperatureThreshold(gpu_device,0)
elif (metric == 'slowdown_temp'):
return nvmlDeviceGetTemperatureThreshold(gpu_device,1)
elif (metric == 'encoder_util'):
return int(nvmlDeviceGetEncoderUtilization(gpu_device)[0])
elif (metric == 'decoder_util'):
return int(nvmlDeviceGetDecoderUtilization(gpu_device)[0])
elif (metric == 'power_violation_report'):
violationData = nvmlDeviceGetViolationStatus(gpu_device, 0)
newTime = violationData.violationTime
if (violation_dur[gpu_id] == 0):
violation_dur[gpu_id] = newTime
diff = newTime - violation_dur[gpu_id]
# % calculation (diff/10)*100/10^9
rate = diff / 100000000
violation_dur[gpu_id] = newTime
print rate
return rate
elif (metric == 'process'):
procs = nvmlDeviceGetComputeRunningProcesses(gpu_device)
return len(procs)
else:
print "Handler for %s not implemented, please fix in gpu_device_handler()" % metric
os._exit(0)
def metric_init(params):
global descriptors
try:
nvmlInit()
except NVMLError, err:
print "Failed to initialize NVML:", str(err)
print "GPU metrics will not be collected..."
return descriptors
default_time_max = 90
build_descriptor('gpu_num', gpu_num_handler, default_time_max, 'uint', 'GPUs', 'zero', '%u', 'Total number of GPUs', 'gpu')
build_descriptor('gpu_use_num', gpu_num_handler, default_time_max, 'uint', 'GPUs', 'zero', '%u', 'Total number of Use GPUs', 'gpu')
build_descriptor('gpu_driver', gpu_driver_version_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU Driver Version', 'gpu')
for i in range(get_gpu_num()):
build_descriptor('gpu%s_type' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s Type' % i, 'gpu')
build_descriptor('gpu%s_graphics_clock_report' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'both', '%u', 'GPU%s Graphics Clock' % i, 'gpu')
build_descriptor('gpu%s_sm_clock_report' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'both', '%u', 'GPU%s SM Clock' % i, 'gpu')
build_descriptor('gpu%s_mem_clock_report' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'both', '%u', 'GPU%s Memory Clock' % i, 'gpu')
build_descriptor('gpu%s_uuid' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s UUID' % i, 'gpu')
build_descriptor('gpu%s_pci_id' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s PCI ID' % i, 'gpu')
build_descriptor('gpu%s_temp' % i, gpu_device_handler, default_time_max, 'uint', 'C', 'both', '%u', 'Temperature of GPU %s' % i, 'gpu,temp')
build_descriptor('gpu%s_mem_total' % i, gpu_device_handler, default_time_max, 'uint', 'MB', 'zero', '%u', 'GPU%s FB Memory Total' %i, 'gpu')
build_descriptor('gpu%s_fb_memory' % i, gpu_device_handler, default_time_max, 'uint', 'MB', 'both', '%u', 'GPU%s FB Memory Used' %i, 'gpu')
build_descriptor('gpu%s_ecc_mode' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s ECC Mode' %i, 'gpu')
#build_descriptor('gpu%s_perf_state' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s Performance State' %i, 'gpu')
build_descriptor('gpu%s_util' % i, gpu_device_handler, default_time_max, 'uint', '%', 'both', '%u', 'GPU%s Utilization' %i, 'gpu')
build_descriptor('gpu%s_mem_util' % i, gpu_device_handler, default_time_max, 'uint', '%', 'both', '%u', 'GPU%s Memory Utilization' %i, 'gpu')
build_descriptor('gpu%s_fan' % i, gpu_device_handler, default_time_max, 'uint', '%', 'both', '%u', 'GPU%s Fan Speed' %i, 'gpu')
build_descriptor('gpu%s_power_usage_report' % i, gpu_device_handler, default_time_max, 'uint', 'watts', 'both', '%u', 'GPU%s Power Usage' % i, 'gpu')
# Added for version 2.285
build_descriptor('gpu%s_max_graphics_clock' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'zero', '%u', 'GPU%s Max Graphics Clock' % i, 'gpu')
build_descriptor('gpu%s_max_sm_clock' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'zero', '%u', 'GPU%s Max SM Clock' % i, 'gpu')
build_descriptor('gpu%s_max_mem_clock' % i, gpu_device_handler, default_time_max, 'uint', 'MHz', 'zero', '%u', 'GPU%s Max Memory Clock' % i, 'gpu')
build_descriptor('gpu%s_serial' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s Serial' % i, 'gpu')
#build_descriptor('gpu%s_power_man_mode' % i, gpu_device_handler, default_time_max, 'string', '', 'zero', '%s', 'GPU%s Power Management' % i, 'gpu')
# Driver version 340.25
build_descriptor('gpu%s_power_man_limit' % i, gpu_device_handler, default_time_max, 'uint', 'Watts', 'zero', '%u', 'GPU%s Power Management Limit' % i, 'gpu')
build_descriptor('gpu%s_ecc_db_error' % i, gpu_device_handler, default_time_max, 'uint', 'No Of Errors', 'both', '%u', 'GPU%s ECC Report' % i, 'gpu')
build_descriptor('gpu%s_ecc_sb_error' % i, gpu_device_handler, default_time_max, 'uint', 'No Of Errors', 'both', '%u', 'GPU%s Single Bit ECC' % i, 'gpu')
build_descriptor('gpu%s_power_violation_report' % i, gpu_device_handler, default_time_max, 'uint', '', 'both', '%u', 'GPU%s Power Violation Report' % i, 'gpu')
build_descriptor('gpu%s_bar1_memory' % i, gpu_device_handler, default_time_max, 'uint', 'MB', 'both', '%u', 'GPU%s Bar1 Memory Used' % i, 'gpu')
build_descriptor('gpu%s_bar1_max_memory' % i, gpu_device_handler, default_time_max, 'uint', 'MB', 'zero', '%u', 'GPU%s Bar1 Memory Total' % i, 'gpu')
build_descriptor('gpu%s_shutdown_temp' % i, gpu_device_handler, default_time_max, 'uint', 'C', 'zero', '%u', 'GPU%s Type' % i, 'gpu')
build_descriptor('gpu%s_slowdown_temp' % i, gpu_device_handler, default_time_max, 'uint', 'C', 'zero', '%u', 'GPU%s Type' % i, 'gpu')
build_descriptor('gpu%s_encoder_util' % i, gpu_device_handler, default_time_max, 'uint', '%', 'both', '%u', 'GPU%s Type' % i, 'gpu')
build_descriptor('gpu%s_decoder_util' % i, gpu_device_handler, default_time_max, 'uint', '%', 'both', '%u', 'GPU%s Type' % i, 'gpu')
return descriptors
def metric_cleanup():
'''Clean up the metric module.'''
try:
nvmlShutdown()
except NVMLError, err:
print "Error shutting down NVML:", str(err)
return 1
#This code is for debugging and unit testing
if __name__ == '__main__':
metric_init({})
for d in descriptors:
v = d['call_back'](d['name'])
if d['value_type'] == 'uint':
print 'value for %s is %u %s' % (d['name'], v, d['units'])
elif d['value_type'] == 'float' or d['value_type'] == 'double':
print 'value for %s is %f %s' % (d['name'], v, d['units'])
elif d['value_type'] == 'string':
print 'value for %s is %s %s' % (d['name'], v, d['units'])
if descriptors:
metric_cleanup()
| spark-rapids-container-dev | Databricks/ganglia/python_modules/nvidia.py |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Richard Hughes <[email protected]>
# Licensed under the GNU General Public License Version or later
from __future__ import print_function
import sys
import json
def main():
if len(sys.argv) != 2:
print("usage: %s supported-gpus.json" % sys.argv[0])
return 1
# open file
f = open(sys.argv[1])
data = json.load(f)
pids = []
for chip in data['chips']:
pid = int(chip['devid'], 16)
if "legacybranch" not in chip.keys():
if not pid in pids:
pids.append(pid)
# output
for pid in pids:
vid = 0x10de
print("pci:v%08Xd%08Xsv*sd*bc*sc*i*" % (vid, pid))
if __name__ == "__main__":
main()
| yum-packaging-nvidia-driver-main | parse-supported-gpus.py |
#!/usr/bin/env python3
##
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import struct
import argparse
import numpy
import warnings
delimiter = ','
def fixInput(val):
if val is None:
return 0
try:
retval = int(val)
except ValueError:
retval = ord(val)
return retval
if len(sys.argv) != 5 and len(sys.argv) != 6:
print("Usage:")
print("\tcsv_to_binary.py <input filename> <column choice> <datatype> <output filename> [delimiter]")
print()
print("This program converts one column of a text file containing a table of data,")
print("(a comma-separated values file by default), into a binary file.")
print()
print("The <column choice> should be an integer in the range [0, N-1], where N is the number of columns.")
print("The <datatype> option should be one of 'int', 'long', 'float', 'double', or 'string'.")
print("'string' keeps the text, converting it to UTF-16 with no separators between the values.")
print("The [delimiter] is an optional argument, and defaults to '%s'" % delimiter)
print("Some delimiters may need to be surrounded by quotation marks or prefixed by a backslash, depending on")
print("the shell, for example space, semicolon, or vertical pipe, due to the command line parsing")
print("interpreting the space or semicolon as a parameter separator or command separator, instead of a")
print("parameter to this script.")
print()
print("Examples:")
print(" text_to_binary.py ExampleFloatData.csv 2 float ZValues.bin")
print(" text_to_binary.py ExampleTable.txt 5 long Dates.bin '|'")
print(" text_to_binary.py SpaceSeparatedData.txt 0 int FirstColumn.bin ' '")
print()
exit()
in_fname = sys.argv[1]
col_num = sys.argv[2]
datatype = sys.argv[3]
out_fname = sys.argv[4]
if len(sys.argv) == 6:
delimiter = sys.argv[5]
# Add more datatypes if needed
if datatype == "int":
dtype = "int32"
elif datatype == "long":
dtype = "int64"
elif datatype == "float":
dtype = "float32"
elif datatype == "double":
dtype = "float64"
elif datatype == "string":
dtype = "str"
else:
print("Please select datatype int, long, float, double, or string")
exit()
print("Reading column " + col_num + ", of type " + datatype + "...")
chunk_size = 10000000
finished = False
offset = 0
with open(str(in_fname), "r") as inFile:
with open(str(out_fname), "wb") as newFile:
with warnings.catch_warnings():
while not finished:
in_data=numpy.genfromtxt(inFile, dtype=dtype,
max_rows=chunk_size, usecols=(int(col_num),), delimiter=delimiter, loose=False)
if offset == 0:
# don't warn about an empty file after we have read something
warnings.filterwarnings('ignore', r'genfromtxt: Empty input file:')
if in_data.size > 0:
in_data.tofile(newFile)
offset += in_data.size
else:
finished = True
if offset != 0:
print('Wrote '+str(offset)+' '+datatype+'s to '+str(out_fname))
else:
print('Wrote no data')
| nvcomp-main | benchmarks/text_to_binary.py |
#! /usr/bin/env python
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str, help="the file to read the data from")
parser.add_argument("plot_directory", type=str, help="the file to read the data from", nargs='*', default='plots')
args = parser.parse_args()
df = pd.read_csv(args.input_file)
df['dataset'] = df['dataset'].str.replace("mortgage-2009Q2-float-columns.bin", "Mortgage Float", regex=False)
df['dataset'] = df['dataset'].str.replace("mortgage-2009Q2-string-columns.bin", "Mortgage String", regex=False)
df['dataset'] = df['dataset'].str.replace("mortgage-2009Q2-col0-long.bin", "Mortgage Long", regex=False)
df['dataset'] = df['dataset'].str.replace("geometrycache.tar", "Geometry", regex=False)
df['dataset'] = df['dataset'].str.replace("texturecache.tar", "Texture", regex=False)
df['dataset'] = df['dataset'].str.replace("silesia.tar", "Silesia", regex=False)
df['dataset'] = df['dataset'].str.replace(" ", "\n", regex=False)
if not os.path.exists(args.plot_directory):
os.makedirs(args.plot_directory)
sns.set(style="whitegrid")
for interface in ['LL', 'HL']:
for metric in ['compression_ratio', 'compression_throughput', 'decompression_throughput']:
plt.figure()
title_metric = metric.replace('_',' ').title()
bar = sns.barplot(x='dataset', y=metric, hue='algorithm', data=df[df.interface==interface])
for container in bar.containers:
bar.bar_label(container, fmt="%.1f")
bar.set_yscale("log")
plt.title(f"{interface.replace('LL', 'Low Level').replace('HL', 'High Level')} {title_metric}")
plt.xlabel('')
plt.ylabel(title_metric)
plt.legend(loc=(1.04,0))
plt.savefig(f"{args.plot_directory}/{metric}-{interface}.png", bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
| nvcomp-main | benchmarks/generate_bar_charts.py |
#!/user/bin/python
##
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import csv
import os
import subprocess
if len(sys.argv) != 3:
print "Usage: python allgather_runall.py <binary input filename> <Max number of GPUs>"
sys.exit(0)
filename = sys.argv[1]
maxgpus = sys.argv[2]
print "Starting benchmark on file:", filename, " - using 2 -", maxgpus, "GPUs"
log = open('allgather-results.log', 'w')
with open('allgather-results.csv', 'w') as f:
thewriter = csv.writer(f)
thewriter.writerow(['Filename', 'num GPUs', 'chunks per GPU', 'No-comp throughput', 'LZ4 throughput', 'Cascaded throughput'])
for gpus in range(2, int(maxgpus)+1):
print "Testing using", gpus, "GPUs..."
for chunks in [1,2,4]:
cmd = './bin/benchmark_allgather -f ' + str(filename) + ' -g ' + str(gpus) + ' -h ' + str(gpus*chunks) + ' -c none'
log.write(cmd + "\n")
result = subprocess.check_output(cmd, shell=True)
log.write(result + "\n")
nocomp = result.split()[-1]
cmd = './bin/benchmark_allgather -f ' + str(filename) + ' -g ' + str(gpus) + ' -h ' + str(gpus*chunks) + ' -c lz4'
log.write(cmd + "\n")
result = subprocess.check_output(cmd, shell=True)
log.write(result + "\n")
lz4 = result.split()[-1]
cmd = './bin/benchmark_allgather -f ' + str(filename) + ' -g ' + str(gpus) + ' -h ' + str(gpus*chunks) + ' -c cascaded'
log.write(cmd + "\n")
result = subprocess.check_output(cmd, shell=True)
log.write(result + "\n")
cascaded = result.split()[-1]
thewriter.writerow([filename, gpus, chunks, nocomp,lz4,cascaded])
| nvcomp-main | benchmarks/allgather_runall.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Carlo Caione <[email protected]>
#
# Derived from plat-stm32mp1/scripts/stm32image.py
#
import argparse
import struct
import mmap
header_size = 0x200
ext_magic_number = 0x12348765
version = 0x00002710
def get_size(file):
file.seek(0, 2) # End of the file
size = file.tell()
return size
def aml_set_header(dest_fd, entry, res_mem_start, res_mem_size, sec_mem_start,
sec_mem_size):
dest_fd.seek(0, 0)
dest_fd.write(struct.pack('<IIQQQQQ',
ext_magic_number,
version,
entry,
res_mem_start,
res_mem_size,
sec_mem_start,
sec_mem_size))
# Padding
dest_fd.write(b'\x00' * 464)
dest_fd.close()
def aml_create_header_file(source, dest, entry, res_mem_start, res_mem_size,
sec_mem_start, sec_mem_size):
dest_fd = open(dest, 'w+b')
src_fd = open(source, 'rb')
dest_fd.write(b'\x00' * header_size)
sizesrc = get_size(src_fd)
if sizesrc > 0:
mmsrc = mmap.mmap(src_fd.fileno(), 0, access=mmap.ACCESS_READ)
dest_fd.write(mmsrc[:sizesrc])
mmsrc.close()
src_fd.close()
aml_set_header(dest_fd, entry, res_mem_start, res_mem_size, sec_mem_start,
sec_mem_size)
dest_fd.close()
def auto_int(x):
return int(x, 0)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source',
required=True,
help='Source file')
parser.add_argument('--dest',
required=True,
help='Destination file')
parser.add_argument('--entry',
required=True,
type=auto_int,
help='Entry point')
parser.add_argument('--res_mem_start',
required=True,
type=auto_int,
help='Reserved memory start')
parser.add_argument('--res_mem_size',
required=True,
type=auto_int,
help='Reserved memory size')
parser.add_argument('--sec_mem_start',
required=True,
type=auto_int,
help='Secure memory start')
parser.add_argument('--sec_mem_size',
required=True,
type=auto_int,
help='Secure memory size')
return parser.parse_args()
def main():
args = get_args()
source_file = args.source
destination_file = args.dest
entry_point = args.entry
res_mem_start = args.res_mem_start
res_mem_size = args.res_mem_size
sec_mem_start = args.sec_mem_start
sec_mem_size = args.sec_mem_size
aml_create_header_file(source_file,
destination_file,
entry_point,
res_mem_start,
res_mem_size,
sec_mem_start,
sec_mem_size)
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | core/arch/arm/plat-amlogic/scripts/aml_bin2img.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2017-2018, STMicroelectronics
#
import argparse
import struct
import mmap
header_size = 256
hdr_magic_number = 0x324D5453 # magic ='S' 'T' 'M' 0x32
hdr_header_ver_variant = 0
hdr_header_ver_minor = 0
hdr_header_ver_major = 1
hdr_version_number = 0
hdr_option_flags = 1 # bit0=1 no signature
hdr_edcsa_algo = 1
def get_size(file):
file.seek(0, 2) # End of the file
size = file.tell()
return size
def stm32image_checksum(dest_fd, sizedest):
csum = 0
if sizedest < header_size:
return 0
dest_fd.seek(header_size, 0)
length = sizedest - header_size
while length > 0:
csum += ord(dest_fd.read(1))
length -= 1
return csum
def stm32image_set_header(dest_fd, load, entry, bintype):
sizedest = get_size(dest_fd)
checksum = stm32image_checksum(dest_fd, sizedest)
dest_fd.seek(0, 0)
# Magic number
dest_fd.write(struct.pack('<I', hdr_magic_number))
# Image signature (empty)
dest_fd.write(b'\x00' * 64)
# Image checksum ... EDCSA algorithm
dest_fd.write(struct.pack('<IBBBBIIIIIIII',
checksum,
hdr_header_ver_variant,
hdr_header_ver_minor,
hdr_header_ver_major,
0,
sizedest - header_size,
entry,
0,
load,
0,
hdr_version_number,
hdr_option_flags,
hdr_edcsa_algo))
# EDCSA public key (empty)
dest_fd.write(b'\x00' * 64)
# Padding
dest_fd.write(b'\x00' * 83)
dest_fd.write(struct.pack('<B', bintype))
dest_fd.close()
def stm32image_create_header_file(source, dest, load, entry, bintype):
dest_fd = open(dest, 'w+b')
src_fd = open(source, 'rb')
dest_fd.write(b'\x00' * header_size)
sizesrc = get_size(src_fd)
if sizesrc > 0:
mmsrc = mmap.mmap(src_fd.fileno(), 0, access=mmap.ACCESS_READ)
dest_fd.write(mmsrc[:sizesrc])
mmsrc.close()
src_fd.close()
stm32image_set_header(dest_fd, load, entry, bintype)
dest_fd.close()
def int_parse(str):
return int(str, 0)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source',
required=True,
help='Source file')
parser.add_argument('--dest',
required=True,
help='Destination file')
parser.add_argument('--load',
required=True, type=int_parse,
help='Load address')
parser.add_argument('--entry',
required=True, type=int_parse,
help='Entry point')
parser.add_argument('--bintype',
required=True, type=int_parse,
help='Binary identification')
return parser.parse_args()
def main():
args = get_args()
source_file = args.source
destination_file = args.dest
load_address = args.load
entry_point = args.entry
binary_type = args.bintype
stm32image_create_header_file(source_file,
destination_file,
load_address,
entry_point,
binary_type)
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | core/arch/arm/plat-stm32mp1/scripts/stm32image.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2015, 2017, 2019, Linaro Limited
#
import sys
import math
algo = {'TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256': 0x70414930,
'TEE_ALG_RSASSA_PKCS1_V1_5_SHA256': 0x70004830}
enc_key_type = {'SHDR_ENC_KEY_DEV_SPECIFIC': 0x0,
'SHDR_ENC_KEY_CLASS_WIDE': 0x1}
SHDR_BOOTSTRAP_TA = 1
SHDR_ENCRYPTED_TA = 2
SHDR_MAGIC = 0x4f545348
SHDR_SIZE = 20
def uuid_parse(s):
from uuid import UUID
return UUID(s)
def int_parse(str):
return int(str, 0)
def get_args(logger):
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import textwrap
command_base = ['sign-enc', 'digest', 'stitch', 'verify']
command_aliases_digest = ['generate-digest']
command_aliases_stitch = ['stitch-ta']
command_aliases = command_aliases_digest + command_aliases_stitch
command_choices = command_base + command_aliases
dat = '[' + ', '.join(command_aliases_digest) + ']'
sat = '[' + ', '.join(command_aliases_stitch) + ']'
parser = ArgumentParser(
description='Sign and encrypt (optional) a Trusted Application for' +
' OP-TEE.',
usage='\n %(prog)s command [ arguments ]\n\n'
' command:\n' +
' sign-enc Generate signed and optionally encrypted loadable' +
' TA image file.\n' +
' Takes arguments --uuid, --ta-version, --in, --out,' +
' --key,\n' +
' --enc-key (optional) and' +
' --enc-key-type (optional).\n' +
' digest Generate loadable TA binary image digest' +
' for offline\n' +
' signing. Takes arguments --uuid, --ta-version,' +
' --in, --key,\n'
' --enc-key (optional), --enc-key-type (optional),' +
' --algo (optional) and --dig.\n' +
' stitch Generate loadable signed and encrypted TA binary' +
' image file from\n' +
' TA raw image and its signature. Takes' +
' arguments --uuid, --in, --key, --out,\n' +
' --enc-key (optional), --enc-key-type (optional),\n' +
' --algo (optional) and --sig.\n' +
' verify Verify signed TA binary\n' +
' Takes arguments --uuid, --in, --key\n\n' +
' %(prog)s --help show available commands and arguments\n\n',
formatter_class=RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
If no command is given, the script will default to "sign-enc".
command aliases:
The command \'digest\' can be aliased by ''' + dat + '''
The command \'stitch\' can be aliased by ''' + sat + '\n' + '''
example offline signing command using OpenSSL for algorithm
TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256:
base64 -d <UUID>.dig | \\
openssl pkeyutl -sign -inkey <KEYFILE>.pem \\
-pkeyopt digest:sha256 -pkeyopt rsa_padding_mode:pss \\
-pkeyopt rsa_pss_saltlen:digest \\
-pkeyopt rsa_mgf1_md:sha256 | \\
base64 > <UUID>.sig\n
example offline signing command using OpenSSL for algorithm
TEE_ALG_RSASSA_PKCS1_V1_5_SHA256:
base64 -d <UUID>.dig | \\
openssl pkeyutl -sign -inkey <KEYFILE>.pem \\
-pkeyopt digest:sha256 -pkeyopt rsa_padding_mode:pkcs1 | \\
base64 > <UUID>.sig
'''))
parser.add_argument(
'command', choices=command_choices, nargs='?',
default='sign-enc',
help='Command, one of [' + ', '.join(command_base) + ']')
parser.add_argument('--uuid', required=True,
type=uuid_parse, help='String UUID of the TA')
parser.add_argument('--key', required=True,
help='Name of signing key file (PEM format) or an ' +
'Amazon Resource Name (arn:) of an AWS KMS ' +
'asymmetric key')
parser.add_argument('--enc-key', required=False,
help='Encryption key string')
parser.add_argument(
'--enc-key-type', required=False, default='SHDR_ENC_KEY_DEV_SPECIFIC',
choices=list(enc_key_type.keys()),
help='Encryption key type.\n' +
'(SHDR_ENC_KEY_DEV_SPECIFIC or SHDR_ENC_KEY_CLASS_WIDE).\n' +
'Defaults to SHDR_ENC_KEY_DEV_SPECIFIC.')
parser.add_argument(
'--ta-version', required=False, type=int_parse, default=0,
help='TA version stored as a 32-bit unsigned integer and used for\n' +
'rollback protection of TA install in the secure database.\n' +
'Defaults to 0.')
parser.add_argument(
'--sig', required=False, dest='sigf',
help='Name of signature input file, defaults to <UUID>.sig')
parser.add_argument(
'--dig', required=False, dest='digf',
help='Name of digest output file, defaults to <UUID>.dig')
parser.add_argument(
'--in', required=True, dest='inf',
help='Name of application input file, defaults to <UUID>.stripped.elf')
parser.add_argument(
'--out', required=False, dest='outf',
help='Name of application output file, defaults to <UUID>.ta')
parser.add_argument('--algo', required=False, choices=list(algo.keys()),
default='TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256',
help='The hash and signature algorithm, ' +
'defaults to TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256. ' +
'Allowed values are: ' +
', '.join(list(algo.keys())), metavar='')
parsed = parser.parse_args()
# Check parameter combinations
if parsed.digf is None and \
parsed.outf is not None and \
parsed.command in ['digest'] + command_aliases_digest:
logger.error('A digest was requested, but argument --out was given.' +
' Did you mean:\n ' +
parser.prog+' --dig ' + parsed.outf + ' ...')
sys.exit(1)
if parsed.digf is not None \
and parsed.outf is not None \
and parsed.command in ['digest'] + command_aliases_digest:
logger.warn('A digest was requested, but arguments --dig and ' +
'--out were given.\n' +
' --out will be ignored.')
# Set defaults for optional arguments.
if parsed.sigf is None:
parsed.sigf = str(parsed.uuid)+'.sig'
if parsed.digf is None:
parsed.digf = str(parsed.uuid)+'.dig'
if parsed.inf is None:
parsed.inf = str(parsed.uuid)+'.stripped.elf'
if parsed.outf is None:
parsed.outf = str(parsed.uuid)+'.ta'
return parsed
def main():
from cryptography import exceptions
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import utils
import base64
import logging
import os
import struct
logging.basicConfig()
logger = logging.getLogger(os.path.basename(__file__))
args = get_args(logger)
if args.key.startswith('arn:'):
from sign_helper_kms import _RSAPrivateKeyInKMS
key = _RSAPrivateKeyInKMS(args.key)
else:
with open(args.key, 'rb') as f:
data = f.read()
try:
key = serialization.load_pem_private_key(
data,
password=None,
backend=default_backend())
except ValueError:
key = serialization.load_pem_public_key(
data,
backend=default_backend())
with open(args.inf, 'rb') as f:
img = f.read()
chosen_hash = hashes.SHA256()
h = hashes.Hash(chosen_hash, default_backend())
digest_len = chosen_hash.digest_size
sig_len = math.ceil(key.key_size / 8)
img_size = len(img)
hdr_version = args.ta_version # struct shdr_bootstrap_ta::ta_version
magic = SHDR_MAGIC
if args.enc_key:
img_type = SHDR_ENCRYPTED_TA
else:
img_type = SHDR_BOOTSTRAP_TA
shdr = struct.pack('<IIIIHH',
magic, img_type, img_size, algo[args.algo],
digest_len, sig_len)
shdr_uuid = args.uuid.bytes
shdr_version = struct.pack('<I', hdr_version)
if args.enc_key:
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
cipher = AESGCM(bytes.fromhex(args.enc_key))
# Use 12 bytes for nonce per recommendation
nonce = os.urandom(12)
out = cipher.encrypt(nonce, img, None)
ciphertext = out[:-16]
# Authentication Tag is always the last 16 bytes
tag = out[-16:]
enc_algo = 0x40000810 # TEE_ALG_AES_GCM
flags = enc_key_type[args.enc_key_type]
ehdr = struct.pack('<IIHH',
enc_algo, flags, len(nonce), len(tag))
h.update(shdr)
h.update(shdr_uuid)
h.update(shdr_version)
if args.enc_key:
h.update(ehdr)
h.update(nonce)
h.update(tag)
h.update(img)
img_digest = h.finalize()
def write_image_with_signature(sig):
with open(args.outf, 'wb') as f:
f.write(shdr)
f.write(img_digest)
f.write(sig)
f.write(shdr_uuid)
f.write(shdr_version)
if args.enc_key:
f.write(ehdr)
f.write(nonce)
f.write(tag)
f.write(ciphertext)
else:
f.write(img)
def sign_encrypt_ta():
if not isinstance(key, rsa.RSAPrivateKey):
logger.error('Provided key cannot be used for signing, ' +
'please use offline-signing mode.')
sys.exit(1)
else:
if args.algo == 'TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256':
sig = key.sign(
img_digest,
padding.PSS(
mgf=padding.MGF1(chosen_hash),
salt_length=digest_len
),
utils.Prehashed(chosen_hash)
)
elif args.algo == 'TEE_ALG_RSASSA_PKCS1_V1_5_SHA256':
sig = key.sign(
img_digest,
padding.PKCS1v15(),
utils.Prehashed(chosen_hash)
)
if len(sig) != sig_len:
raise Exception(("Actual signature length is not equal to ",
"the computed one: {} != {}").
format(len(sig), sig_len))
write_image_with_signature(sig)
logger.info('Successfully signed application.')
def generate_digest():
with open(args.digf, 'wb+') as digfile:
digfile.write(base64.b64encode(img_digest))
def stitch_ta():
try:
with open(args.sigf, 'r') as sigfile:
sig = base64.b64decode(sigfile.read())
except IOError:
if not os.path.exists(args.digf):
generate_digest()
logger.error('No signature file found. Please sign\n %s\n' +
'offline and place the signature at \n %s\n' +
'or pass a different location ' +
'using the --sig argument.\n',
args.digf, args.sigf)
sys.exit(1)
else:
try:
if args.algo == 'TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256':
key.verify(
sig,
img_digest,
padding.PSS(
mgf=padding.MGF1(chosen_hash),
salt_length=digest_len
),
utils.Prehashed(chosen_hash)
)
elif args.algo == 'TEE_ALG_RSASSA_PKCS1_V1_5_SHA256':
key.verify(
sig,
img_digest,
padding.PKCS1v15(),
utils.Prehashed(chosen_hash)
)
except exceptions.InvalidSignature:
logger.error('Verification failed, ignoring given signature.')
sys.exit(1)
write_image_with_signature(sig)
logger.info('Successfully applied signature.')
def verify_ta():
# Extract header
[magic,
img_type,
img_size,
algo_value,
digest_len,
sig_len] = struct.unpack('<IIIIHH', img[:SHDR_SIZE])
# Extract digest and signature
start, end = SHDR_SIZE, SHDR_SIZE + digest_len
digest = img[start:end]
start, end = end, SHDR_SIZE + digest_len + sig_len
signature = img[start:end]
# Extract UUID and TA version
start, end = end, end + 16 + 4
[uuid, ta_version] = struct.unpack('<16sI', img[start:end])
if magic != SHDR_MAGIC:
raise Exception("Unexpected magic: 0x{:08x}".format(magic))
if img_type != SHDR_BOOTSTRAP_TA:
raise Exception("Unsupported image type: {}".format(img_type))
if algo_value not in algo.values():
raise Exception('Unrecognized algorithm: 0x{:08x}'
.format(algo_value))
# Verify signature against hash digest
if algo_value == 0x70414930:
key.verify(
signature,
digest,
padding.PSS(
mgf=padding.MGF1(chosen_hash),
salt_length=digest_len
),
utils.Prehashed(chosen_hash)
)
else:
key.verify(
signature,
digest,
padding.PKCS1v15(),
utils.Prehashed(chosen_hash)
)
h = hashes.Hash(chosen_hash, default_backend())
# sizeof(struct shdr)
h.update(img[:SHDR_SIZE])
# sizeof(struct shdr_bootstrap_ta)
h.update(img[start:end])
# raw image
start = end
end += img_size
h.update(img[start:end])
if digest != h.finalize():
raise Exception('Hash digest does not match')
logger.info('Trusted application is correctly verified.')
# dispatch command
{
'sign-enc': sign_encrypt_ta,
'digest': generate_digest,
'generate-digest': generate_digest,
'stitch': stitch_ta,
'stitch-ta': stitch_ta,
'verify': verify_ta,
}.get(args.command, 'sign_encrypt_ta')()
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/sign_encrypt.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
import argparse
import sys
import zlib
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input StMM binary (BL32_AP_MM.fd)')
parser.add_argument('--output',
required=True, type=argparse.FileType('w'),
help='The output stmm_hex.c')
return parser.parse_args()
def main():
args = get_args()
inf = args.input
outf = args.output
bytes = inf.read()
uncompressed_size = len(bytes)
bytes = zlib.compress(bytes)
size = len(bytes)
outf.write('/* Automatically generated, do no edit */\n')
outf.write('const unsigned char stmm_image[] = {\n')
i = 0
while i < size:
if i % 8 == 0:
outf.write('\t')
outf.write('0x{:02x},'.format(bytes[i]))
i = i + 1
if i % 8 == 0 or i == size:
outf.write('\n')
else:
outf.write(' ')
outf.write('};\n')
outf.write('const unsigned int stmm_image_size = sizeof(stmm_image);\n')
outf.write('const unsigned int stmm_image_uncompressed_size = '
'{:d};\n'.format(uncompressed_size))
inf.close()
outf.close()
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/gen_stmm_hex.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2017, 2020, Linaro Limited
# Copyright (c) 2020-2022, Arm Limited.
#
import argparse
import array
from elftools.elf.elffile import ELFFile
import os
import re
import struct
import uuid
import zlib
def get_args():
parser = argparse.ArgumentParser(
description='Converts a Trusted '
'Application ELF file into a C source file, ready for '
'inclusion in the TEE binary as an "early TA".')
parser.add_argument('--out', required=True,
help='Name of the output C file')
parser.add_argument(
'--ta',
required=False,
help='Path to the TA binary. File name has to be: <uuid>.* '
'such as: 8aaaf200-2450-11e4-abe2-0002a5d5c51b.stripped.elf')
parser.add_argument(
'--sp',
required=False,
help='Path to the SP binary. File name has to be: <uuid>.* '
'such as: 8aaaf200-2450-11e4-abe2-0002a5d5c51b.stripped.elf')
parser.add_argument(
'--compress',
dest="compress",
action="store_true",
help='Compress the image using the DEFLATE '
'algorithm')
parser.add_argument(
'--manifest',
dest="manifest",
required=False,
help='path to the SP manifest file')
return parser.parse_args()
def get_name(obj):
# Symbol or section .name can be a byte array or a string, we want a string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def ta_get_flags(ta_f):
with open(ta_f, 'rb') as f:
elffile = ELFFile(f)
for s in elffile.iter_sections():
if get_name(s) == '.ta_head':
return struct.unpack('<16x4xI', s.data()[:24])[0]
raise Exception('.ta_head section not found')
def sp_get_flags(sp_f):
with open(sp_f, 'rb') as f:
elffile = ELFFile(f)
for s in elffile.iter_sections():
if get_name(s) == '.sp_head':
return struct.unpack('<16x4xI', s.data()[:24])[0]
raise Exception('.sp_head section not found')
def dump_bin(f, ts, compress):
with open(ts, 'rb') as _ts:
bytes = _ts.read()
uncompressed_size = len(bytes)
if compress:
bytes = zlib.compress(bytes)
size = len(bytes)
i = 0
while i < size:
if i % 8 == 0:
f.write('\t\t')
f.write(hex(bytes[i]) + ',')
i = i + 1
if i % 8 == 0 or i == size:
f.write('\n')
else:
f.write(' ')
return (size, uncompressed_size)
def main():
args = get_args()
is_sp = False
if args.ta is None and args.sp is None:
raise Exception('The --ta or the --sp flag is required')
if args.ta is not None and args.sp is not None:
raise Exception('The --ta and the --sp can\'t be combined')
if args.ta is not None:
ts = args.ta
is_sp = False
if args.sp is not None:
ts = args.sp
is_sp = True
ts_uuid = uuid.UUID(re.sub(r'\..*', '', os.path.basename(ts)))
f = open(args.out, 'w')
f.write('/* Generated from ' + ts + ' by ' +
os.path.basename(__file__) + ' */\n\n')
f.write('#include <kernel/embedded_ts.h>\n\n')
f.write('#include <scattered_array.h>\n\n')
f.write('const uint8_t ts_bin_' + ts_uuid.hex + '[] = {\n')
ts_size, ts_uncompressed_size = dump_bin(f, ts, args.compress)
f.write('};\n')
if is_sp:
f.write('#include <kernel/secure_partition.h>\n\n')
f.write('const uint8_t fdt_bin_' + ts_uuid.hex + '[] = {\n')
dump_bin(f, args.manifest, False)
f.write('};\n')
f.write('SCATTERED_ARRAY_DEFINE_PG_ITEM(sp_images, struct \
sp_image) = {\n')
f.write('\t.fdt = fdt_bin_' + ts_uuid.hex + ',\n')
f.write('. image = {')
f.write('\t.flags = 0x{:04x},\n'.format(sp_get_flags(ts)))
else:
f.write('SCATTERED_ARRAY_DEFINE_PG_ITEM(early_tas, struct \
embedded_ts) = {\n')
f.write('\t.flags = 0x{:04x},\n'.format(ta_get_flags(ts)))
f.write('\t.uuid = {\n')
f.write('\t\t.timeLow = 0x{:08x},\n'.format(ts_uuid.time_low))
f.write('\t\t.timeMid = 0x{:04x},\n'.format(ts_uuid.time_mid))
f.write('\t\t.timeHiAndVersion = ' +
'0x{:04x},\n'.format(ts_uuid.time_hi_version))
f.write('\t\t.clockSeqAndNode = {\n')
csn = '{0:02x}{1:02x}{2:012x}'.format(ts_uuid.clock_seq_hi_variant,
ts_uuid.clock_seq_low, ts_uuid.node)
f.write('\t\t\t')
f.write(', '.join('0x' + csn[i:i + 2] for i in range(0, len(csn), 2)))
f.write('\n\t\t},\n\t},\n')
f.write('\t.size = sizeof(ts_bin_' + ts_uuid.hex +
'), /* {:d} */\n'.format(ts_size))
f.write('\t.ts = ts_bin_' + ts_uuid.hex + ',\n')
if args.compress:
f.write('\t.uncompressed_size = '
'{:d},\n'.format(ts_uncompressed_size))
if is_sp:
f.write('}\n')
f.write('};\n')
f.close()
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/ts_bin_to_c.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2021 Huawei Technologies Co., Ltd
def dump(msg, buf):
import codecs
print(msg, end='')
print(codecs.encode(buf, 'hex').decode('utf-8'))
def main():
import struct
import sys
img_type_name = {1: 'SHDR_BOOTSTRAP_TA', 2: 'SHDR_ENCRYPTED_TA'}
algo_name = {0x70414930: 'RSASSA_PKCS1_PSS_MGF1_SHA256',
0x70004830: 'RSASSA_PKCS1_V1_5_SHA256'}
with open(sys.argv[1], 'rb') as f:
shdr = f.read(20)
(magic, img_type, img_size, algo, digest_len,
sig_len) = struct.unpack('<IIIIHH', shdr)
print(f'Magic: 0x{magic:x} ', end='')
if magic == 0x4f545348: # SHDR_MAGIC
print('(correct)')
else:
print('(**INCORRECT**)')
return
print(f'Image type: {img_type} ({img_type_name[img_type]})')
print(f'Image size: {img_size} bytes')
print(f'Signing algorithm: 0x{algo:x} ({algo_name[algo]})')
print(f'Digest length: {digest_len} bytes')
print(f'Signature length: {sig_len} bytes')
digest = f.read(digest_len)
dump('Digest: ', digest)
if __name__ == '__main__':
main()
| optee_os-nvidia-rel-35 | scripts/dump_ta_header.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2021, Huawei Technologies Co., Ltd
#
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import codecs
import sys
verbose = False
def dump(buf):
print(codecs.encode(buf, 'hex').decode('utf-8'))
def resolve_symbol(elf, name):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
for symbol in section.iter_symbols():
if symbol.name == name:
return symbol.entry['st_value']
raise RuntimeError(f'Symbol {name} not found')
def hash_range(h, elf, start, end):
global verbose
start_addr = resolve_symbol(elf, start)
end_addr = resolve_symbol(elf, end)
size = end_addr - start_addr
if verbose:
print(f'[{start}(0x{start_addr:x}), {end}(0x{end_addr:x})]: '
f'{size} bytes')
for segment in elf.iter_segments():
if (segment['p_type'] == 'PT_LOAD' and
segment['p_vaddr'] <= start_addr and
end_addr <= segment['p_vaddr'] + segment['p_filesz']):
begin_offs = start_addr - segment['p_vaddr']
h.update(segment.data()[begin_offs:begin_offs + size])
def hash_section(h, elf, name):
global verbose
s = elf.get_section_by_name(name)
if s is None:
return
d = s.data()
if verbose:
print(f'{name}: {len(d)} bytes')
h.update(d)
def main():
global verbose
argc = len(sys.argv)
if argc != 2 and argc != 3:
print('Usage:', sys.argv[0], '<tee.elf>')
return 1
if argc == 3 and sys.argv[1] == '-v':
verbose = True
with open(sys.argv[argc - 1], 'rb') as f:
elf = ELFFile(f)
h = hashes.Hash(hashes.SHA256(), default_backend())
hash_range(h, elf, '__text_start', '__text_data_start')
hash_range(h, elf, '__text_data_end', '__text_end')
hash_section(h, elf, '.text_init')
hash_section(h, elf, '.text_pageable')
hash_section(h, elf, '.rodata')
hash_section(h, elf, '.rodata_init')
hash_section(h, elf, '.rodata_pageable')
dump(h.finalize())
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/print_tee_hash.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2016, Linaro Limited
import struct
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=False, dest='inf',
default='../out/arm/core/tee.bin',
help='The input tee.bin')
return parser.parse_args()
def main():
args = get_args()
with open(args.inf, "rb") as f:
data = f.read(4)
magic = struct.unpack('<I', data)[0]
print("Magic: \t\t0x{:08x}".format(magic))
data = f.read(1)
version = struct.unpack('<B', data)[0]
print("Version: \t0x{:02x}".format(version))
data = f.read(1)
arch_id = struct.unpack('<B', data)[0]
print("ArchID: \t0x{:02x}".format(arch_id))
data = f.read(2)
flags = struct.unpack('<H', data)[0]
print("Arch Flags: \t0x{:04x}".format(arch_id))
data = f.read(4)
init_size = struct.unpack('<I', data)[0]
print("Init size: \t0x{:04x}".format(init_size))
data = f.read(4)
laddr_h = struct.unpack('<I', data)[0]
print("Load addr high:\t0x{:04x}".format(laddr_h))
data = f.read(4)
laddr_l = struct.unpack('<I', data)[0]
print("Load addr low: \t0x{:04x}".format(laddr_l))
data = f.read(4)
mem_usage = struct.unpack('<I', data)[0]
print("Mem usage: \t0x{:04x}".format(mem_usage))
data = f.read(4)
pgd_size = struct.unpack('<I', data)[0]
print("Pages size: \t0x{:04x}".format(pgd_size))
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/tee_bin_parser.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2018, Linaro Limited
#
import argparse
import array
import os
import re
import sys
def get_args():
parser = argparse.ArgumentParser(description='Converts a binary file '
'into C source file defining binary '
'data as a constant byte array.')
parser.add_argument('--bin', required=True,
help='Path to the input binary file')
parser.add_argument('--vname', required=True,
help='Variable name for the generated table in '
'the output C source file.')
parser.add_argument('--out', required=True,
help='Path for the generated C file')
parser.add_argument('--text', required=False, action='store_true',
help='Treat input as a text file')
return parser.parse_args()
def main():
args = get_args()
with open(args.bin, 'rb') as indata:
bytes = indata.read()
if args.text:
bytes += b'\0'
size = len(bytes)
f = open(args.out, 'w')
f.write('/* Generated from ' + args.bin + ' by ' +
os.path.basename(__file__) + ' */\n\n')
f.write('#include <compiler.h>\n')
f.write('#include <stdint.h>\n')
if args.text:
f.write('__extension__ const char ' + args.vname + '[] = {\n')
else:
f.write('__extension__ const uint8_t ' + args.vname + '[] ' +
' __aligned(__alignof__(uint64_t)) = {\n')
i = 0
while i < size:
if i % 8 == 0:
f.write('\t\t')
if args.text and i != size - 1 and bytes[i] == b'\0':
print('Error: null byte encountered in text file')
sys.exit(1)
f.write(hex(bytes[i]) + ',')
i = i + 1
if i % 8 == 0 or i == size:
f.write('\n')
else:
f.write(' ')
f.write('};\n')
f.close()
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/bin_to_c.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from elftools.elf.constants import P_FLAGS
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def emit_load_segments(elffile, outf):
load_size = 0
code_size = 0
data_size = 0
load_segments = [s for s in elffile.iter_segments()
if s['p_type'] == 'PT_LOAD']
prev_segment = None
pad = 0
pad_size = []
w_found = False
n = 0
# Check that load segments ordered by VA have the expected layout:
# read only first, then read-write. Compute padding at end of each segment,
# 0 if none is required.
for segment in load_segments:
if prev_segment:
pad = segment['p_vaddr'] - (prev_segment['p_vaddr'] +
prev_segment['p_filesz'])
else:
if segment['p_flags'] & P_FLAGS.PF_W:
print('Expected RO load segment(s) first')
sys.exit(1)
if segment['p_flags'] & P_FLAGS.PF_W:
if not w_found:
# End of RO segments, discard padding for the last one (it
# would just take up space in the generated C file)
pad = 0
w_found = True
else:
if w_found:
print('RO load segment found after RW one(s) (m={})'.format(n))
sys.exit(1)
if prev_segment:
if pad > 31:
# We expect segments to be tightly packed together for memory
# efficiency. 31 is an arbitrary, "sounds reasonable" value
# which might need to be adjusted -- who knows what the
# compiler/linker can do.
print('Warning: suspiciously large padding ({}) after load '
'segment {}, please check'.format(pad, n-1))
pad_size.append(pad)
prev_segment = segment
n = n + 1
pad_size.append(0)
n = 0
# Compute code_size, data_size and load_size
for segment in load_segments:
sz = segment['p_filesz'] + pad_size[n]
if segment['p_flags'] & P_FLAGS.PF_W:
data_size += sz
else:
code_size += sz
load_size += sz
n = n + 1
n = 0
i = 0
# Output data to C file
outf.write(b'const uint8_t ldelf_data[%d]' % round_up(load_size, 4096))
outf.write(b' __aligned(4096) = {\n')
for segment in load_segments:
data = segment.data()
if pad_size[n]:
# Pad with zeros if needed
data += bytearray(pad_size[n])
for j in range(len(data)):
if i % 8 == 0:
outf.write(b'\t')
outf.write(b'0x' + '{:02x}'.format(data[j]).encode('utf-8')
+ b',')
i = i + 1
if i % 8 == 0 or i == load_size:
outf.write(b'\n')
else:
outf.write(b' ')
n = n + 1
outf.write(b'};\n')
outf.write(b'const unsigned int ldelf_code_size = %d;\n' % code_size)
outf.write(b'const unsigned int ldelf_data_size = %d;\n' % data_size)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input ldelf.elf')
parser.add_argument('--output',
required=True, type=argparse.FileType('wb'),
help='The output ldelf_hex.c')
return parser.parse_args()
def main():
args = get_args()
inf = args.input
outf = args.output
elffile = ELFFile(inf)
outf.write(b'/* Automatically generated, do no edit */\n')
outf.write(b'#include <compiler.h>\n')
outf.write(b'#include <stdint.h>\n')
emit_load_segments(elffile, outf)
outf.write(b'const unsigned long ldelf_entry = %lu;\n' %
elffile.header['e_entry'])
inf.close()
outf.close()
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/gen_ldelf_hex.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2015, Linaro Limited
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--prefix', required=True,
help='Prefix for the public key exponent and modulus in c file')
parser.add_argument(
'--out', required=True,
help='Name of c file for the public key')
parser.add_argument('--key', required=True, help='Name of key file')
return parser.parse_args()
def main():
import array
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
args = get_args()
with open(args.key, 'rb') as f:
data = f.read()
try:
key = serialization.load_pem_private_key(data, password=None,
backend=default_backend())
key = key.public_key()
except ValueError:
key = serialization.load_pem_public_key(data,
backend=default_backend())
# Refuse public exponent with more than 32 bits. Otherwise the C
# compiler may simply truncate the value and proceed.
# This will lead to TAs seemingly having invalid signatures with a
# possible security issue for any e = k*2^32 + 1 (for any integer k).
if key.public_numbers().e > 0xffffffff:
raise ValueError(
'Unsupported large public exponent detected. ' +
'OP-TEE handles only public exponents up to 2^32 - 1.')
with open(args.out, 'w') as f:
f.write("#include <stdint.h>\n")
f.write("#include <stddef.h>\n\n")
f.write("const uint32_t " + args.prefix + "_exponent = " +
str(key.public_numbers().e) + ";\n\n")
f.write("const uint8_t " + args.prefix + "_modulus[] = {\n")
i = 0
nbuf = key.public_numbers().n.to_bytes(key.key_size >> 3, 'big')
for x in array.array("B", nbuf):
f.write("0x" + '{0:02x}'.format(x) + ",")
i = i + 1
if i % 8 == 0:
f.write("\n")
else:
f.write(" ")
f.write("};\n")
f.write("const size_t " + args.prefix + "_modulus_size = sizeof(" +
args.prefix + "_modulus);\n")
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/pem_to_pub_c.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from subprocess import Popen, PIPE
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Helper script that updates '
'the CHANGELOG.md file.\n'
'Usage example:\n'
' ./update_changelog.py '
' --changelog-file CHANGELOG.md'
' --release-version 3.7.0'
' --previous-release-version 3.6.0'
' --release-date 2019-10-11')
parser.add_argument('--changelog-file', action='store', required=False,
default='CHANGELOG.md',
help='Changelog file to be updated.')
parser.add_argument('--release-date', action='store', required=True,
help='The release date (yyyy-mm-dd).')
parser.add_argument('--release-version', action='store', required=True,
help='Release version (MAJOR.MINOR.PATCH).')
parser.add_argument('--previous-release-version', action='store',
required=True,
help='Previous release version (MAJOR.MINOR.PATCH).')
return parser.parse_args()
def prepend_write(filename, text):
with open(filename, 'r+') as f:
current_content = f.read()
f.seek(0, 0)
f.write(text + '\n' + current_content)
f.flush()
def get_previous_release_date(tag):
cmd = "git log -1 --date=format:%Y-%m-%d --format=format:%cd " \
"{}".format(tag)
process = Popen(cmd.split(), stdout=PIPE)
(output, err) = process.communicate()
return output.decode("utf-8")
def main():
global args
args = get_args()
gits = ["OP-TEE/optee_os", "OP-TEE/optee_client", "OP-TEE/optee_test",
"OP-TEE/build", "linaro-swg/optee_examples"]
# Shorten name
clf = args.changelog_file
rv = args.release_version
prv = args.previous_release_version
rd = args.release_date
prd = get_previous_release_date(prv)
# In some cases we need underscore in string
rvu = rv.replace('.', '_')
text = "# OP-TEE - version {} ({})\n".format(rv, rd)
text += "\n"
text += "- Links to the release pages, commits and pull requests merged " \
"into this release for:\n"
for g in gits:
gu = g.replace('/', '_')
gu = gu.replace('-', '_')
text += " - {}: [release page][{}_release_{}], " \
"[commits][{}_commits_{}] and [pull requests]" \
"[{}_pr_{}]\n".format(g, gu, rvu, gu, rvu, gu, rvu)
text += "\n"
for g in gits:
gu = g.replace('/', '_')
gu = gu.replace('-', '_')
text += "\n[{}_release_{}]: https://github.com/{}/releases/tag/" \
"{}\n".format(gu, rvu, g, rv)
text += "[{}_commits_{}]: https://github.com/{}/compare/" \
"{}...{}\n".format(gu, rvu, g, prv, rv)
text += "[{}_pr_{}]: https://github.com/{}/pulls?q=is%3Apr+is%3A" \
"merged+base%3Amaster+merged%3A{}..{}\n".format(
gu, rvu, g, prd, rd)
prepend_write(args.changelog_file, text)
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/update_changelog.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def get_arch_id(elffile):
e_machine = elffile.header['e_machine']
if e_machine == 'EM_ARM':
return 0
if e_machine == 'EM_AARCH64':
return 1
eprint('Unknown e_machine "%s"' % e_machine)
sys.exit(1)
def get_name(obj):
# Symbol or section .name might be a byte array or a string, we want a
# string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def get_symbol(elffile, name):
global elffile_symbols
global lsyms_def
if elffile_symbols is None:
elffile_symbols = dict()
lsyms_def = dict()
symbol_tables = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for symbol in section.iter_symbols():
symbol_name = get_name(symbol)
if symbol['st_info']['bind'] == 'STB_GLOBAL':
elffile_symbols[symbol_name] = symbol
elif symbol['st_info']['bind'] == 'STB_LOCAL':
if symbol_name not in elffile_symbols.keys():
elffile_symbols[symbol_name] = symbol
if symbol_name not in lsyms_def.keys():
lsyms_def[symbol_name] = 1
else:
lsyms_def[symbol_name] += 1
if name in lsyms_def.keys() and lsyms_def[name] > 1:
eprint("Multiple definitions of local symbol %s" % name)
sys.exit(1)
if name not in elffile_symbols.keys():
eprint("Cannot find symbol %s" % name)
sys.exit(1)
return elffile_symbols[name]
def get_sections(elffile, pad_to, dump_names):
last_end = 0
bin_data = bytearray()
for section in elffile.iter_sections():
section_name = get_name(section)
if (section['sh_type'] == 'SHT_NOBITS' or
not (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) or
not dump_names.match(section_name)):
continue
if last_end == 0:
bin_data = section.data()
else:
if section['sh_addr'] > last_end:
bin_data += bytearray(section['sh_addr'] - last_end)
bin_data += section.data()
last_end = section['sh_addr'] + section['sh_size']
if pad_to > last_end:
bin_data += bytearray(pad_to - last_end)
last_end = pad_to
return bin_data
def get_pageable_bin(elffile):
global tee_pageable_bin
if tee_pageable_bin is None:
pad_to = 0
dump_names = re.compile(r'^\..*_(pageable|init)$')
tee_pageable_bin = get_sections(elffile, pad_to, dump_names)
return tee_pageable_bin
def get_pager_bin(elffile):
global tee_pager_bin
if tee_pager_bin is None:
pad_to = get_symbol(elffile, '__data_end')['st_value']
dump_names = re.compile(r'^\.(text|nex_data|rodata|ctors|got|data|'
r'data\.rel\.ro|ARM\.exidx|ARM\.extab)$')
tee_pager_bin = get_sections(elffile, pad_to, dump_names)
return tee_pager_bin
def get_reloc_bin(elffile):
if get_arch_id(elffile) == 0:
exp_rel_type = ENUM_RELOC_TYPE_ARM['R_ARM_RELATIVE']
else:
exp_rel_type = ENUM_RELOC_TYPE_AARCH64['R_AARCH64_RELATIVE']
link_address = get_symbol(elffile, '__text_start')['st_value']
addrs = []
for section in elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
for rel in section.iter_relocations():
if rel['r_info_type'] == 0:
continue
if rel['r_info_type'] != exp_rel_type:
eprint("Unexpected relocation type 0x%x" %
rel['r_info_type'])
sys.exit(1)
addrs.append(rel['r_offset'] - link_address)
addrs.sort()
data = bytearray()
for a in addrs:
data += struct.pack('<I', a)
# Relocations has been reduced to only become the relative type with
# addend at the address (r_offset) of relocation, that is, increase by
# load_offset. The addresses (r_offset) are also sorted. The format is
# then:
# uint32_t: relocation #1
# uint32_t: relocation #2
# ...
# uint32_t: relocation #n
return data
def get_hashes_bin(elffile):
pageable_bin = get_pageable_bin(elffile)
if len(pageable_bin) % small_page_size != 0:
eprint("pageable size not a multiple of 4K: "
"{}".format(paged_area_size))
sys.exit(1)
data = bytearray()
for n in range(0, len(pageable_bin), small_page_size):
page = pageable_bin[n:n + small_page_size]
data += hashlib.sha256(page).digest()
return data
def get_embdata_bin(elffile):
global tee_embdata_bin
if tee_embdata_bin is None:
hashes_bin = get_hashes_bin(elffile)
reloc_bin = get_reloc_bin(elffile)
num_entries = 2
hash_offs = 2 * 4 + num_entries * (2 * 4)
hash_pad = round_up(len(hashes_bin), 8) - len(hashes_bin)
reloc_offs = hash_offs + len(hashes_bin) + hash_pad
reloc_pad = round_up(len(reloc_bin), 8) - len(reloc_bin)
total_len = reloc_offs + len(reloc_bin) + reloc_pad
tee_embdata_bin = struct.pack('<IIIIII', total_len, num_entries,
hash_offs, len(hashes_bin),
reloc_offs, len(reloc_bin))
tee_embdata_bin += hashes_bin + bytearray(hash_pad)
tee_embdata_bin += reloc_bin + bytearray(reloc_pad)
# The embedded data region is designed to be easy to extend when
# needed, it's formatted as:
# +---------------------------------------------------------+
# | uint32_t: Length of entire area including this field |
# +---------------------------------------------------------+
# | uint32_t: Number of entries "2" |
# +---------------------------------------------------------+
# | uint32_t: Offset of hashes from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of hashes |
# +---------------------------------------------------------+
# | uint32_t: Offset of relocations from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of relocations |
# +---------------------------------------------------------+
# | Data of hashes + eventual padding |
# +---------------------------------------------------------+
# | Data of relocations + eventual padding |
# +---------------------------------------------------------+
return tee_embdata_bin
def output_pager_bin(elffile, outf):
outf.write(get_pager_bin(elffile))
def output_pageable_bin(elffile, outf):
outf.write(get_pageable_bin(elffile))
def get_init_load_addr(elffile):
init_load_addr = get_symbol(elffile, '_start')['st_value']
init_load_addr_hi = init_load_addr >> 32
init_load_addr_lo = init_load_addr & 0xffffffff
return init_load_addr_hi, init_load_addr_lo
def output_raw_bin(elffile, outf):
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v1(elffile, outf):
arch_id = get_arch_id(elffile)
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(pager_bin)
paged_area_size = len(pageable_bin)
init_mem_usage = (get_symbol(elffile, '__get_tee_init_end')['st_value'] -
get_symbol(elffile, '__text_start')['st_value'] +
len(embdata_bin))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
len(embdata_bin))
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 1
flags = 0
outf.write(struct.pack('<IBBHIIIII', magic, version, arch_id, flags,
init_size, init_load_addr[0], init_load_addr[1],
init_mem_usage, paged_size))
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v2(elffile, outf):
arch_id = get_arch_id(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(get_pager_bin(elffile))
paged_area_size = len(get_pageable_bin(elffile))
embdata_bin_size = len(get_embdata_bin(elffile))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
embdata_bin_size)
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 2
flags = 0
nb_images = 1 if paged_size == 0 else 2
outf.write(struct.pack('<IBBHI', magic, version, arch_id, flags,
nb_images))
outf.write(struct.pack('<IIII', init_load_addr[0], init_load_addr[1],
0, init_size))
if nb_images == 2:
outf.write(struct.pack('<IIII', 0xffffffff, 0xffffffff, 1, paged_size))
def output_pager_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
def output_pageable_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(get_pageable_bin(elffile)[init_bin_size:])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input tee.elf')
parser.add_argument('--out_tee_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee.bin')
parser.add_argument('--out_tee_raw_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_raw.bin')
parser.add_argument('--out_tee_pager_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager.bin')
parser.add_argument('--out_tee_pageable_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable.bin')
parser.add_argument('--out_header_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_header_v2.bin')
parser.add_argument('--out_pager_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager_v2.bin')
parser.add_argument('--out_pageable_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable_v2.bin')
return parser.parse_args()
def main():
args = get_args()
elffile = ELFFile(args.input)
if args.out_tee_raw_bin:
output_raw_bin(elffile, args.out_tee_raw_bin)
if args.out_tee_bin:
output_header_v1(elffile, args.out_tee_bin)
if args.out_tee_pager_bin:
output_pager_bin(elffile, args.out_tee_pager_bin)
if args.out_tee_pageable_bin:
output_pageable_bin(elffile, args.out_tee_pageable_bin)
if args.out_header_v2:
output_header_v2(elffile, args.out_header_v2)
if args.out_pager_v2:
output_pager_v2(elffile, args.out_pager_v2)
if args.out_pageable_v2:
output_pageable_v2(elffile, args.out_pageable_v2)
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/gen_tee_bin.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2017, Linaro Limited
#
import sys
import re
def usage():
print("Usage: {0} <section reg exp match> [<skip section>...]".format(
sys.argv[0]))
sys.exit(1)
def main():
if len(sys.argv) < 2:
usage()
in_shdr = False
section_headers = re.compile("Section Headers:")
key_to_flags = re.compile("Key to Flags:")
match_rule = re.compile(sys.argv[1])
skip_sections = sys.argv[2:]
for line in sys.stdin:
if section_headers.match(line):
in_shdr = True
continue
if key_to_flags.match(line):
in_shdr = False
continue
if not in_shdr:
continue
words = line.split()
if len(words) < 3:
continue
if words[0] == "[":
name_offs = 2
else:
name_offs = 1
sect_name = words[name_offs]
sect_type = words[name_offs + 1]
if sect_type != "PROGBITS":
continue
if not match_rule.match(sect_name):
continue
if sect_name in skip_sections:
continue
print('\t*({0})'.format(sect_name))
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/gen_ld_sects.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2017, Linaro Limited
#
import argparse
import errno
import glob
import os
import re
import subprocess
import sys
import termios
CALL_STACK_RE = re.compile('Call stack:')
TEE_LOAD_ADDR_RE = re.compile(r'TEE load address @ (?P<load_addr>0x[0-9a-f]+)')
# This gets the address from lines looking like this:
# E/TC:0 0x001044a8
STACK_ADDR_RE = re.compile(
r'[UEIDFM]/(TC|LD):(\?*|[0-9]*) [0-9]* +(?P<addr>0x[0-9a-f]+)')
ABORT_ADDR_RE = re.compile(r'-abort at address (?P<addr>0x[0-9a-f]+)')
TA_PANIC_RE = re.compile(r'TA panicked with code (?P<code>0x[0-9a-f]+)')
REGION_RE = re.compile(r'region +[0-9]+: va (?P<addr>0x[0-9a-f]+) '
r'pa 0x[0-9a-f]+ size (?P<size>0x[0-9a-f]+)'
r'( flags .{4} (\[(?P<elf_idx>[0-9]+)\])?)?')
ELF_LIST_RE = re.compile(r'\[(?P<idx>[0-9]+)\] (?P<uuid>[0-9a-f\-]+)'
r' @ (?P<load_addr>0x[0-9a-f\-]+)')
FUNC_GRAPH_RE = re.compile(r'Function graph')
GRAPH_ADDR_RE = re.compile(r'(?P<addr>0x[0-9a-f]+)')
GRAPH_RE = re.compile(r'}')
epilog = '''
This scripts reads an OP-TEE abort or panic message from stdin and adds debug
information to the output, such as '<function> at <file>:<line>' next to each
address in the call stack. Any message generated by OP-TEE and containing a
call stack can in principle be processed by this script. This currently
includes aborts and panics from the TEE core as well as from any TA.
The paths provided on the command line are used to locate the appropriate ELF
binary (tee.elf or Trusted Application). The GNU binutils (addr2line, objdump,
nm) are used to extract the debug info. If the CROSS_COMPILE environment
variable is set, it is used as a prefix to the binutils tools. That is, the
script will invoke $(CROSS_COMPILE)addr2line etc. If it is not set however,
the prefix will be determined automatically for each ELF file based on its
architecture (arm-linux-gnueabihf-, aarch64-linux-gnu-). The resulting command
is then expected to be found in the user's PATH.
OP-TEE abort and panic messages are sent to the secure console. They look like
the following:
E/TC:0 User TA data-abort at address 0xffffdecd (alignment fault)
...
E/TC:0 Call stack:
E/TC:0 0x4000549e
E/TC:0 0x40001f4b
E/TC:0 0x4000273f
E/TC:0 0x40005da7
Inspired by a script of the same name by the Chromium project.
Sample usage:
$ scripts/symbolize.py -d out/arm-plat-hikey/core -d ../optee_test/out/ta/*
<paste whole dump here>
^D
Also, this script reads function graph generated for OP-TEE user TA from
/tmp/ftrace-<ta_uuid>.out file and resolves function addresses to corresponding
symbols.
Sample usage:
$ cat /tmp/ftrace-<ta_uuid>.out | scripts/symbolize.py -d <ta_uuid>.elf
<paste function graph here>
^D
'''
tee_result_names = {
'0xf0100001': 'TEE_ERROR_CORRUPT_OBJECT',
'0xf0100002': 'TEE_ERROR_CORRUPT_OBJECT_2',
'0xf0100003': 'TEE_ERROR_STORAGE_NOT_AVAILABLE',
'0xf0100004': 'TEE_ERROR_STORAGE_NOT_AVAILABLE_2',
'0xf0100006': 'TEE_ERROR_CIPHERTEXT_INVALID ',
'0xffff0000': 'TEE_ERROR_GENERIC',
'0xffff0001': 'TEE_ERROR_ACCESS_DENIED',
'0xffff0002': 'TEE_ERROR_CANCEL',
'0xffff0003': 'TEE_ERROR_ACCESS_CONFLICT',
'0xffff0004': 'TEE_ERROR_EXCESS_DATA',
'0xffff0005': 'TEE_ERROR_BAD_FORMAT',
'0xffff0006': 'TEE_ERROR_BAD_PARAMETERS',
'0xffff0007': 'TEE_ERROR_BAD_STATE',
'0xffff0008': 'TEE_ERROR_ITEM_NOT_FOUND',
'0xffff0009': 'TEE_ERROR_NOT_IMPLEMENTED',
'0xffff000a': 'TEE_ERROR_NOT_SUPPORTED',
'0xffff000b': 'TEE_ERROR_NO_DATA',
'0xffff000c': 'TEE_ERROR_OUT_OF_MEMORY',
'0xffff000d': 'TEE_ERROR_BUSY',
'0xffff000e': 'TEE_ERROR_COMMUNICATION',
'0xffff000f': 'TEE_ERROR_SECURITY',
'0xffff0010': 'TEE_ERROR_SHORT_BUFFER',
'0xffff0011': 'TEE_ERROR_EXTERNAL_CANCEL',
'0xffff300f': 'TEE_ERROR_OVERFLOW',
'0xffff3024': 'TEE_ERROR_TARGET_DEAD',
'0xffff3041': 'TEE_ERROR_STORAGE_NO_SPACE',
'0xffff3071': 'TEE_ERROR_MAC_INVALID',
'0xffff3072': 'TEE_ERROR_SIGNATURE_INVALID',
'0xffff5000': 'TEE_ERROR_TIME_NOT_SET',
'0xffff5001': 'TEE_ERROR_TIME_NEEDS_RESET',
}
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Symbolizes OP-TEE abort dumps or function graphs',
epilog=epilog)
parser.add_argument('-d', '--dir', action='append', nargs='+',
help='Search for ELF file in DIR. tee.elf is needed '
'to decode a TEE Core or pseudo-TA abort, while '
'<TA_uuid>.elf is required if a user-mode TA has '
'crashed. For convenience, ELF files may also be '
'given.')
parser.add_argument('-s', '--strip_path', nargs='?',
help='Strip STRIP_PATH from file paths (default: '
'current directory, use -s with no argument to show '
'full paths)', default=os.getcwd())
return parser.parse_args()
class Symbolizer(object):
def __init__(self, out, dirs, strip_path):
self._out = out
self._dirs = dirs
self._strip_path = strip_path
self._addr2line = None
self.reset()
def my_Popen(self, cmd):
try:
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
bufsize=1)
except OSError as e:
if e.errno == errno.ENOENT:
print("*** Error:{}: command not found".format(cmd[0]),
file=sys.stderr)
sys.exit(1)
def get_elf(self, elf_or_uuid):
if not elf_or_uuid.endswith('.elf'):
elf_or_uuid += '.elf'
for d in self._dirs:
if d.endswith(elf_or_uuid) and os.path.isfile(d):
return d
elf = glob.glob(d + '/' + elf_or_uuid)
if elf:
return elf[0]
def set_arch(self, elf):
self._arch = os.getenv('CROSS_COMPILE')
if self._arch:
return
p = subprocess.Popen(['file', '-L', elf], stdout=subprocess.PIPE)
output = p.stdout.readlines()
p.terminate()
if b'ARM aarch64,' in output[0]:
self._arch = 'aarch64-linux-gnu-'
elif b'ARM,' in output[0]:
self._arch = 'arm-linux-gnueabihf-'
def arch_prefix(self, cmd, elf):
self.set_arch(elf)
if self._arch is None:
return ''
return self._arch + cmd
def spawn_addr2line(self, elf_name):
if elf_name is None:
return
if self._addr2line_elf_name is elf_name:
return
if self._addr2line:
self._addr2line.terminate
self._addr2line = None
elf = self.get_elf(elf_name)
if not elf:
return
cmd = self.arch_prefix('addr2line', elf)
if not cmd:
return
self._addr2line = self.my_Popen([cmd, '-f', '-p', '-e', elf])
self._addr2line_elf_name = elf_name
# If addr falls into a region that maps a TA ELF file, return the load
# address of that file.
def elf_load_addr(self, addr):
if self._regions:
for r in self._regions:
r_addr = int(r[0], 16)
r_size = int(r[1], 16)
i_addr = int(addr, 16)
if (i_addr >= r_addr and i_addr < (r_addr + r_size)):
# Found region
elf_idx = r[2]
if elf_idx is not None:
return self._elfs[int(elf_idx)][1]
# In case address is not found in TA ELF file, fallback to tee.elf
# especially to symbolize mixed (user-space and kernel) addresses
# which is true when syscall ftrace is enabled along with TA
# ftrace.
return self._tee_load_addr
else:
# tee.elf
return self._tee_load_addr
def elf_for_addr(self, addr):
l_addr = self.elf_load_addr(addr)
if l_addr == self._tee_load_addr:
return 'tee.elf'
for k in self._elfs:
e = self._elfs[k]
if int(e[1], 16) == int(l_addr, 16):
return e[0]
return None
def subtract_load_addr(self, addr):
l_addr = self.elf_load_addr(addr)
if l_addr is None:
return None
if int(l_addr, 16) > int(addr, 16):
return ''
return '0x{:x}'.format(int(addr, 16) - int(l_addr, 16))
def resolve(self, addr):
reladdr = self.subtract_load_addr(addr)
self.spawn_addr2line(self.elf_for_addr(addr))
if not reladdr or not self._addr2line:
return '???'
if self.elf_for_addr(addr) == 'tee.elf':
reladdr = '0x{:x}'.format(int(reladdr, 16) +
int(self.first_vma('tee.elf'), 16))
try:
print(reladdr, file=self._addr2line.stdin)
ret = self._addr2line.stdout.readline().rstrip('\n')
except IOError:
ret = '!!!'
return ret
# Armv8.5 with Memory Tagging Extension (MTE)
def strip_armv85_mte_tag(self, addr):
i_addr = int(addr, 16)
i_addr &= ~(0xf << 56)
return '0x{:x}'.format(i_addr)
def symbol_plus_offset(self, addr):
ret = ''
prevsize = 0
addr = self.strip_armv85_mte_tag(addr)
reladdr = self.subtract_load_addr(addr)
elf_name = self.elf_for_addr(addr)
if elf_name is None:
return ''
elf = self.get_elf(elf_name)
cmd = self.arch_prefix('nm', elf)
if not reladdr or not elf or not cmd:
return ''
ireladdr = int(reladdr, 16)
nm = self.my_Popen([cmd, '--numeric-sort', '--print-size', elf])
for line in iter(nm.stdout.readline, ''):
try:
addr, size, _, name = line.split()
except ValueError:
# Size is missing
try:
addr, _, name = line.split()
size = '0'
except ValueError:
# E.g., undefined (external) symbols (line = "U symbol")
continue
iaddr = int(addr, 16)
isize = int(size, 16)
if iaddr == ireladdr:
ret = name
break
if iaddr < ireladdr and iaddr + isize >= ireladdr:
offs = ireladdr - iaddr
ret = name + '+' + str(offs)
break
if iaddr > ireladdr and prevsize == 0:
offs = iaddr + ireladdr
ret = prevname + '+' + str(offs)
break
prevsize = size
prevname = name
nm.terminate()
return ret
def section_plus_offset(self, addr):
ret = ''
reladdr = self.subtract_load_addr(addr)
elf_name = self.elf_for_addr(addr)
if elf_name is None:
return ''
elf = self.get_elf(elf_name)
cmd = self.arch_prefix('objdump', elf)
if not reladdr or not elf or not cmd:
return ''
iaddr = int(reladdr, 16)
objdump = self.my_Popen([cmd, '--section-headers', elf])
for line in iter(objdump.stdout.readline, ''):
try:
idx, name, size, vma, lma, offs, algn = line.split()
except ValueError:
continue
ivma = int(vma, 16)
isize = int(size, 16)
if ivma == iaddr:
ret = name
break
if ivma < iaddr and ivma + isize >= iaddr:
offs = iaddr - ivma
ret = name + '+' + str(offs)
break
objdump.terminate()
return ret
def process_abort(self, line):
ret = ''
match = re.search(ABORT_ADDR_RE, line)
addr = match.group('addr')
pre = match.start('addr')
post = match.end('addr')
sym = self.symbol_plus_offset(addr)
sec = self.section_plus_offset(addr)
if sym or sec:
ret += line[:pre]
ret += addr
if sym:
ret += ' ' + sym
if sec:
ret += ' ' + sec
ret += line[post:]
return ret
# Return all ELF sections with the ALLOC flag
def read_sections(self, elf_name):
if elf_name is None:
return
if elf_name in self._sections:
return
elf = self.get_elf(elf_name)
if not elf:
return
cmd = self.arch_prefix('objdump', elf)
if not elf or not cmd:
return
self._sections[elf_name] = []
objdump = self.my_Popen([cmd, '--section-headers', elf])
for line in iter(objdump.stdout.readline, ''):
try:
_, name, size, vma, _, _, _ = line.split()
except ValueError:
if 'ALLOC' in line:
self._sections[elf_name].append([name, int(vma, 16),
int(size, 16)])
def first_vma(self, elf_name):
self.read_sections(elf_name)
return '0x{:x}'.format(self._sections[elf_name][0][1])
def overlaps(self, section, addr, size):
sec_addr = section[1]
sec_size = section[2]
if not size or not sec_size:
return False
return ((addr <= (sec_addr + sec_size - 1)) and
((addr + size - 1) >= sec_addr))
def sections_in_region(self, addr, size, elf_idx):
ret = ''
addr = self.subtract_load_addr(addr)
if not addr:
return ''
iaddr = int(addr, 16)
isize = int(size, 16)
elf = self._elfs[int(elf_idx)][0]
if elf is None:
return ''
self.read_sections(elf)
if elf not in self._sections:
return ''
for s in self._sections[elf]:
if self.overlaps(s, iaddr, isize):
ret += ' ' + s[0]
return ret
def reset(self):
self._call_stack_found = False
if self._addr2line:
self._addr2line.terminate()
self._addr2line = None
self._addr2line_elf_name = None
self._arch = None
self._saved_abort_line = ''
self._sections = {} # {elf_name: [[name, addr, size], ...], ...}
self._regions = [] # [[addr, size, elf_idx, saved line], ...]
self._elfs = {0: ["tee.elf", 0]} # {idx: [uuid, load_addr], ...}
self._tee_load_addr = '0x0'
self._func_graph_found = False
self._func_graph_skip_line = True
def pretty_print_path(self, path):
if self._strip_path:
return re.sub(re.escape(self._strip_path) + '/*', '', path)
return path
def write(self, line):
if self._call_stack_found:
match = re.search(STACK_ADDR_RE, line)
if match:
addr = match.group('addr')
pre = match.start('addr')
post = match.end('addr')
self._out.write(line[:pre])
self._out.write(addr)
# The call stack contains return addresses (LR/ELR values).
# Heuristic: subtract 2 to obtain the call site of the function
# or the location of the exception. This value works for A64,
# A32 as well as Thumb.
pc = 0
lr = int(addr, 16)
if lr:
pc = lr - 2
res = self.resolve('0x{:x}'.format(pc))
res = self.pretty_print_path(res)
self._out.write(' ' + res)
self._out.write(line[post:])
return
else:
self.reset()
if self._func_graph_found:
match = re.search(GRAPH_ADDR_RE, line)
match_re = re.search(GRAPH_RE, line)
if match:
addr = match.group('addr')
pre = match.start('addr')
post = match.end('addr')
self._out.write(line[:pre])
res = self.resolve(addr)
res_arr = re.split(' ', res)
self._out.write(res_arr[0])
self._out.write(line[post:])
self._func_graph_skip_line = False
return
elif match_re:
self._out.write(line)
return
elif self._func_graph_skip_line:
return
else:
self.reset()
match = re.search(REGION_RE, line)
if match:
# Region table: save info for later processing once
# we know which UUID corresponds to which ELF index
addr = match.group('addr')
size = match.group('size')
elf_idx = match.group('elf_idx')
self._regions.append([addr, size, elf_idx, line])
return
match = re.search(ELF_LIST_RE, line)
if match:
# ELF list: save info for later. Region table and ELF list
# will be displayed when the call stack is reached
i = int(match.group('idx'))
self._elfs[i] = [match.group('uuid'), match.group('load_addr'),
line]
return
match = re.search(TA_PANIC_RE, line)
if match:
code = match.group('code')
if code in tee_result_names:
line = line.strip() + ' (' + tee_result_names[code] + ')\n'
self._out.write(line)
return
match = re.search(TEE_LOAD_ADDR_RE, line)
if match:
self._tee_load_addr = match.group('load_addr')
match = re.search(CALL_STACK_RE, line)
if match:
self._call_stack_found = True
if self._regions:
for r in self._regions:
r_addr = r[0]
r_size = r[1]
elf_idx = r[2]
saved_line = r[3]
if elf_idx is None:
self._out.write(saved_line)
else:
self._out.write(saved_line.strip() +
self.sections_in_region(r_addr,
r_size,
elf_idx) +
'\n')
if self._elfs:
for k in self._elfs:
e = self._elfs[k]
if (len(e) >= 3):
# TA executable or library
self._out.write(e[2].strip())
elf = self.get_elf(e[0])
if elf:
rpath = os.path.realpath(elf)
path = self.pretty_print_path(rpath)
self._out.write(' (' + path + ')')
self._out.write('\n')
# Here is a good place to resolve the abort address because we
# have all the information we need
if self._saved_abort_line:
self._out.write(self.process_abort(self._saved_abort_line))
match = re.search(FUNC_GRAPH_RE, line)
if match:
self._func_graph_found = True
match = re.search(ABORT_ADDR_RE, line)
if match:
self.reset()
# At this point the arch and TA load address are unknown.
# Save the line so We can translate the abort address later.
self._saved_abort_line = line
self._out.write(line)
def flush(self):
self._out.flush()
def main():
args = get_args()
if args.dir:
# Flatten list in case -d is used several times *and* with multiple
# arguments
args.dirs = [item for sublist in args.dir for item in sublist]
else:
args.dirs = []
symbolizer = Symbolizer(sys.stdout, args.dirs, args.strip_path)
fd = sys.stdin.fileno()
isatty = os.isatty(fd)
if isatty:
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ECHO # lflags
try:
if isatty:
termios.tcsetattr(fd, termios.TCSADRAIN, new)
for line in sys.stdin:
symbolizer.write(line)
finally:
symbolizer.flush()
if isatty:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/symbolize.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright Amazon.com Inc. or its affiliates
#
import typing
import boto3
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext,
utils as asym_utils,
)
from cryptography.hazmat.primitives.asymmetric.padding import (
AsymmetricPadding,
PKCS1v15,
PSS,
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey,
RSAPrivateNumbers,
RSAPublicKey,
)
class _RSAPrivateKeyInKMS(RSAPrivateKey):
def __init__(self, arn):
self.arn = arn
self.client = boto3.client('kms')
response = self.client.get_public_key(KeyId=self.arn)
# Parse public key
self.public_key = serialization.load_der_public_key(
response['PublicKey'])
@property
def key_size(self):
return self.public_key.key_size
def public_key(self) -> RSAPublicKey:
return self.public_key
def sign(self, data: bytes, padding: AsymmetricPadding,
algorithm: typing.Union[asym_utils.Prehashed,
hashes.HashAlgorithm]
) -> bytes:
if isinstance(algorithm, asym_utils.Prehashed):
message_type = 'DIGEST'
else:
message_type = 'RAW'
if isinstance(padding, PSS):
signing_alg = 'RSASSA_PSS_'
elif isinstance(padding, PKCS1v15):
signing_alg = 'RSASSA_PKCS1_V1_5_'
else:
raise TypeError("Unsupported padding")
if (isinstance(algorithm._algorithm, hashes.SHA256) or
isinstance(algorithm, hashes.SHA256)):
signing_alg += 'SHA_256'
elif (isinstance(algorithm._algorithm, hashes.SHA384) or
isinstance(algorithm, hashes.SHA384)):
signing_alg += 'SHA_384'
elif (isinstance(algorithm._algorithm, hashes.SHA512) or
isinstance(algorithm, hashes.SHA512)):
signing_alg += 'SHA_512'
else:
raise TypeError("Unsupported hashing algorithm")
response = self.client.sign(
KeyId=self.arn, Message=data,
MessageType=message_type,
SigningAlgorithm=signing_alg)
return response['Signature']
# No need to implement these functions so we raise an exception
def signer(
self, padding: AsymmetricPadding, algorithm: hashes.HashAlgorithm
) -> AsymmetricSignatureContext:
raise NotImplementedError
def decrypt(self, ciphertext: bytes, padding: AsymmetricPadding) -> bytes:
raise NotImplementedError
def private_numbers(self) -> RSAPrivateNumbers:
raise NotImplementedError
def private_bytes(
self,
encoding: serialization.Encoding,
format: serialization.PrivateFormat,
encryption_algorithm: serialization.KeySerializationEncryption
) -> bytes:
raise NotImplementedError
| optee_os-nvidia-rel-35 | scripts/sign_helper_kms.py |
#!/usr/bin/env python3
#
# Copyright (c) 2014-2017, Linaro Limited
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
import subprocess
import sys
def get_args():
parser = argparse.ArgumentParser(description='Shows the memory usage '
'of an OP-TEE based on ELF sections')
parser.add_argument('tee_elf', help='the OP-TEE ELF file (tee.elf)')
parser.add_argument('-a', '--all', action='store_true',
help=' same as -i -p -u -U')
parser.add_argument('-n', '--no-map', action='store_true',
help=' do not show the detailed section mappings and '
'RAM usage')
parser.add_argument('-i', '--init', action='store_true',
help='report the total size of the .*_init sections')
parser.add_argument('-p', '--paged', action='store_true',
help='report the total size of the .*_pageable '
'sections')
parser.add_argument('-u', '--unpaged', action='store_true',
help='report the total size of the unpaged sections, '
'that is, all sections but the ones in --init or '
'--paged')
parser.add_argument('-U', '--unpaged-no-heap', action='store_true',
help='report the size of all unpaged sections '
'excluding heap space. Reflects the size of unpaged '
'code and data (.text, .rodata, .data, .bss, .nozi '
'and possibly unwind tables)')
parser.add_argument('-r', '--raw', action='store_true',
help='when processing -i, -p, -u, or -U, show only '
'the size (in decimal) and no other text')
return parser.parse_args()
def printf(format, *args):
sys.stdout.write(format % args)
def print_sect(name, addr, size, round_up=False, print_num_pages=False):
if args.no_map:
return
if size == 0:
size_kib = 0
num_pages = 0
else:
if round_up:
size_kib = (size - 1) / 1024 + 1
else:
size_kib = size / 1024
num_pages = (size - 1) / 4096 + 1
printf('%-16s %.8X - %.8X size %.8X %3d KiB', name, addr, addr + size,
size, size_kib)
if print_num_pages:
printf(' %d pages', num_pages)
printf('\n')
def print_pager_stat(name, size):
size_kib = size / 1024
if args.raw:
printf('%d ', size)
else:
printf('%-36s size %.8X %3d KiB\n', name, size, size_kib)
def readelf_cmd():
return os.getenv('CROSS_COMPILE', '') + 'readelf'
def main():
global args
in_shdr = False
sects = []
init_size = 0
paged_size = 0
unpaged_size = 0
unpaged_no_heap_size = 0
args = get_args()
env = os.environ.copy()
env['LC_ALL'] = 'C'
readelf = subprocess.Popen(str.split(readelf_cmd()) + ['-s',
args.tee_elf],
stdout=subprocess.PIPE, env=env,
universal_newlines=True)
for line in iter(readelf.stdout.readline, ''):
words = line.split()
if len(words) == 8 and words[7] == '_end_of_ram':
end_of_ram = int(words[1], 16)
break
readelf.terminate()
readelf = subprocess.Popen(str.split(readelf_cmd()) + ['-S', '-W',
args.tee_elf],
stdout=subprocess.PIPE, env=env,
universal_newlines=True)
for line in iter(readelf.stdout.readline, ''):
if 'Section Headers:' in line:
in_shdr = True
continue
if 'Key to Flags:' in line:
in_shdr = False
continue
if in_shdr:
words = line.split()
if words[0] == '[':
words.pop(0)
try:
(_, name, _, addr, offs, size, _,
flags) = words[:8]
except BaseException:
continue
if ('A' in flags):
sects.append({'name': name, 'addr': addr,
'offs': offs, 'size': size})
first_addr = None
for sect in sects:
if sect['addr'] != 0:
addr = sect['addr']
if not first_addr:
first_addr = addr
if int(addr, 16) >= end_of_ram:
break
last_addr = addr
last_size = sect['size']
ram_usage = int(last_addr, 16) + int(last_size, 16) - int(first_addr, 16)
print_sect('RAM Usage', int(first_addr, 16), ram_usage, True, True)
last_addr = 0
last_size = 0
for sect in sects:
name = sect['name']
addr = int(sect['addr'], 16)
size = int(sect['size'], 16)
if addr >= end_of_ram:
break
if last_addr != 0 and addr != last_addr + last_size:
print_sect('*hole*', last_addr + last_size,
addr - (last_addr + last_size))
print_sect(name, addr, size)
if name.endswith('_init'):
init_size += size
elif name.endswith('_pageable'):
paged_size += size
else:
if not name.startswith('.heap'):
unpaged_no_heap_size += size
unpaged_size += size
last_addr = addr
last_size = size
if args.all or args.init:
print_pager_stat('Init sections (.*_init)', init_size)
if args.all or args.paged:
print_pager_stat('Paged sections (.*_pageable)', paged_size)
if args.all or args.unpaged:
print_pager_stat('Unpaged sections ', unpaged_size)
if args.all or args.unpaged_no_heap:
print_pager_stat('Unpaged sections (heap excluded)',
unpaged_no_heap_size)
if (args.raw and (args.all or args.init or args.paged or
args.unpaged or args.unpaged_no_heap)):
printf('\n')
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/mem_usage.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2018, Linaro Limited
#
import argparse
import sys
import re
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def my_err(line_number, msg):
eprint('Error: line:' + repr(line_number) + ' ' + msg)
sys.exit(1)
def gen_read64_macro(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro read_' + reg_name.lower() + ' reg0, reg1')
print('\tmrrc\tp15, ' + opc1 + ', \\reg0, \\reg1, ' + crm)
print('\t.endm')
def gen_write64_macro(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower() + ' reg0, reg1')
print('\tmcrr\tp15, ' + opc1 + ', \\reg0, \\reg1, ' + crm)
print('\t.endm')
def gen_read32_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro read_' + reg_name.lower() + ' reg')
print('\tmrc p15, ' + opc1 + ', \\reg, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_write32_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower() + ' reg')
print('\tmcr p15, ' + opc1 + ', \\reg, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_write32_dummy_macro(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('\t# ' + descr)
print('\t.macro write_' + reg_name.lower())
print('\t# Register ignored')
print('\tmcr p15, ' + opc1 + ', r0, ' + crn + ', ' + crm + ', ' + opc2)
print('\t.endm')
def gen_read64_func(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof uint64_t read_' + reg_name.lower() +
'(void)')
print('{')
print('\tuint64_t v;')
print('')
print('\tasm volatile ("mrrc p15, ' + opc1 + ', %Q0, %R0, ' +
crm + '"' + ' : "=r" (v));')
print('')
print('\treturn v;')
print('}')
def gen_write64_func(reg_name, opc1, crm, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() +
'(uint64_t v)')
print('{')
print('\tasm volatile ("mcrr p15, ' + opc1 + ', %Q0, %R0, ' +
crm + '"' + ' : : "r" (v));')
print('}')
def gen_read32_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof uint32_t read_' + reg_name.lower() +
'(void)')
print('{')
print('\tuint32_t v;')
print('')
print('\tasm volatile ("mrc p15, ' + opc1 + ', %0, ' + crn + ', ' +
crm + ', ' + opc2 + '"' + ' : "=r" (v));')
print('')
print('\treturn v;')
print('}')
def gen_write32_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() +
'(uint32_t v)')
print('{')
print('\tasm volatile ("mcr p15, ' + opc1 + ', %0, ' + crn + ', ' +
crm + ', ' + opc2 + '"' + ' : : "r" (v));')
print('}')
def gen_write32_dummy_func(reg_name, crn, opc1, crm, opc2, descr):
print('')
if len(descr):
print('/* ' + descr + ' */')
print('static inline __noprof void write_' + reg_name.lower() + '(void)')
print('{')
print('\t/* Register ignored */')
print('\tasm volatile ("mcr p15, ' + opc1 + ', r0, ' + crn + ', ' +
crm + ', ' + opc2 + '");')
print('}')
def gen_file(line, line_number, s_file):
words = line.split()
if len(words) == 0:
return
if len(re.findall('^ *#', line)):
return
if len(re.findall('^ *@', line)):
comment = re.sub('^ *@', '', line)
comment = re.sub('^ *', '', comment)
comment = re.sub('[ \n]*$', '', comment)
if len(comment) == 0:
print('')
return
if s_file:
print('# ' + comment)
else:
print('/* ' + comment + ' */')
return
reg_name = words[0]
crn = words[1]
opc1 = words[2]
crm = words[3]
opc2 = words[4]
access_type = words[5]
descr = " ".join(words[6:])
read_access = access_type == 'RO' or access_type == 'RW'
write_access = (access_type == 'WO' or access_type == 'RW' or
access_type == 'WOD')
dummy_access = access_type == 'WOD'
if not read_access and not write_access:
my_err(line_number, 'bad Access Type "' + access_type + '"')
if crn == '-':
if opc2 != '-':
my_err(line_number, 'bad opc2, expected -')
if read_access:
if s_file:
gen_read64_macro(reg_name, opc1, crm, descr)
else:
gen_read64_func(reg_name, opc1, crm, descr)
if s_file:
gen_write64_macro(reg_name, opc1, crm, descr)
else:
gen_write64_func(reg_name, opc1, crm, descr)
else:
if read_access:
if s_file:
gen_read32_macro(reg_name, crn, opc1, crm, opc2, descr)
else:
gen_read32_func(reg_name, crn, opc1, crm, opc2, descr)
if write_access:
if dummy_access:
if s_file:
gen_write32_dummy_macro(reg_name, crn, opc1, crm, opc2,
descr)
else:
gen_write32_dummy_func(reg_name, crn, opc1, crm, opc2,
descr)
else:
if s_file:
gen_write32_macro(reg_name, crn, opc1, crm, opc2, descr)
else:
gen_write32_func(reg_name, crn, opc1, crm, opc2, descr)
def get_args():
parser = argparse.ArgumentParser(description='Generates instructions to '
'access ARM32 system registers.')
parser.add_argument('--s_file', action='store_true',
help='Generate an Assembly instead of a C file')
parser.add_argument('--guard',
help='Provide #ifdef <guard_argument> in C file')
return parser.parse_args()
def main():
args = get_args()
cmnt = 'Automatically generated, do not edit'
if args.s_file:
print('# ' + cmnt)
else:
print('/* ' + cmnt + ' */')
if args.guard is not None:
print('#ifndef ' + args.guard.upper().replace('.', '_'))
print('#define ' + args.guard.upper().replace('.', '_'))
print('#include <compiler.h>')
line_number = 0
for line in sys.stdin:
line_number = line_number + 1
gen_file(line, line_number, args.s_file)
if not args.s_file and args.guard is not None:
print('#endif /*' + args.guard.upper().replace('.', '_') + '*/')
if __name__ == '__main__':
main()
| optee_os-nvidia-rel-35 | scripts/arm32_sysreg.py |
#!/usr/bin/env python3
#
# Copyright (c) 2019, Linaro Limited
#
# SPDX-License-Identifier: BSD-2-Clause
from pathlib import PurePath
from urllib.request import urlopen
import argparse
import glob
import os
import re
import tempfile
DIFF_GIT_RE = re.compile(r'^diff --git a/(?P<path>.*) ')
REVIEWED_RE = re.compile(r'^Reviewed-by: (?P<approver>.*>)')
ACKED_RE = re.compile(r'^Acked-by: (?P<approver>.*>)')
PATCH_START = re.compile(r'^From [0-9a-f]{40}')
def get_args():
parser = argparse.ArgumentParser(description='Print the maintainers for '
'the given source files or directories; '
'or for the files modified by a patch or '
'a pull request. '
'(With -m) Check if a patch or pull '
'request is properly Acked/Reviewed for '
'merging.')
parser.add_argument('-m', '--merge-check', action='store_true',
help='use Reviewed-by: and Acked-by: tags found in '
'patches to prevent display of information for all '
'the approved paths.')
parser.add_argument('-p', '--show-paths', action='store_true',
help='show all paths that are not approved.')
parser.add_argument('-s', '--strict', action='store_true',
help='stricter conditions for patch approval check: '
'subsystem "THE REST" is ignored for paths that '
'match some other subsystem.')
parser.add_argument('arg', nargs='*', help='file or patch')
parser.add_argument('-f', '--file', action='append',
help='treat following argument as a file path, not '
'a patch.')
parser.add_argument('-g', '--github-pr', action='append', type=int,
help='Github pull request ID. The script will '
'download the patchset from Github to a temporary '
'file and process it.')
parser.add_argument('-r', '--release-to', action='store_true',
help='show all the recipients to be used in release '
'announcement emails (i.e., maintainers, reviewers '
'and OP-TEE mailing list(s)) and exit.')
return parser.parse_args()
def check_cwd():
cwd = os.getcwd()
parent = os.path.dirname(os.path.realpath(__file__)) + "/../"
if (os.path.realpath(cwd) != os.path.realpath(parent)):
print("Error: this script must be run from the top-level of the "
"optee_os tree")
exit(1)
# Parse MAINTAINERS and return a dictionary of subsystems such as:
# {'Subsystem name': {'R': ['foo', 'bar'], 'S': ['Maintained'],
# 'F': [ 'path1', 'path2' ]}, ...}
def parse_maintainers():
subsystems = {}
check_cwd()
with open("MAINTAINERS", "r") as f:
start_found = False
ss = {}
name = ''
for line in f:
line = line.strip()
if not line:
continue
if not start_found:
if line.startswith("----------"):
start_found = True
continue
if line[1] == ':':
letter = line[0]
if (not ss.get(letter)):
ss[letter] = []
ss[letter].append(line[3:])
else:
if name:
subsystems[name] = ss
name = line
ss = {}
if name:
subsystems[name] = ss
return subsystems
# If @patchset is a patchset files and contains 2 patches or more, write
# individual patches to temporary files and return the paths.
# Otherwise return [].
def split_patchset(patchset):
psname = os.path.basename(patchset).replace('.', '_')
patchnum = 0
of = None
ret = []
f = None
try:
f = open(patchset, "r")
except OSError:
return []
for line in f:
match = re.search(PATCH_START, line)
if match:
# New patch found: create new file
patchnum += 1
prefix = "{}_{}_".format(patchnum, psname)
of = tempfile.NamedTemporaryFile(mode="w", prefix=prefix,
suffix=".patch",
delete=False)
ret.append(of.name)
if of:
of.write(line)
if len(ret) >= 2:
return ret
if len(ret) == 1:
os.remove(ret[0])
return []
# If @path is a patch file, returns the paths touched by the patch as well
# as the content of the review/ack tags
def get_paths_from_patch(patch):
paths = []
approvers = []
try:
with open(patch, "r") as f:
for line in f:
match = re.search(DIFF_GIT_RE, line)
if match:
p = match.group('path')
if p not in paths:
paths.append(p)
continue
match = re.search(REVIEWED_RE, line)
if match:
a = match.group('approver')
if a not in approvers:
approvers.append(a)
continue
match = re.search(ACKED_RE, line)
if match:
a = match.group('approver')
if a not in approvers:
approvers.append(a)
continue
except Exception:
pass
return (paths, approvers)
# Does @path match @pattern?
# @pattern has the syntax defined in the Linux MAINTAINERS file -- mostly a
# shell glob pattern, except that a trailing slash means a directory and
# everything below. Matching can easily be done by converting to a regexp.
def match_pattern(path, pattern):
# Append a trailing slash if path is an existing directory, so that it
# matches F: entries such as 'foo/bar/'
if not path.endswith('/') and os.path.isdir(path):
path += '/'
rep = "^" + pattern
rep = rep.replace('*', '[^/]+')
rep = rep.replace('?', '[^/]')
if rep.endswith('/'):
rep += '.*'
rep += '$'
return not not re.match(rep, path)
def get_subsystems_for_path(subsystems, path, strict):
found = {}
for key in subsystems:
def inner():
excluded = subsystems[key].get('X')
if excluded:
for pattern in excluded:
if match_pattern(path, pattern):
return # next key
included = subsystems[key].get('F')
if not included:
return # next key
for pattern in included:
if match_pattern(path, pattern):
found[key] = subsystems[key]
inner()
if strict and len(found) > 1:
found.pop('THE REST', None)
return found
def get_ss_maintainers(subsys):
return subsys.get('M') or []
def get_ss_reviewers(subsys):
return subsys.get('R') or []
def get_ss_approvers(ss):
return get_ss_maintainers(ss) + get_ss_reviewers(ss)
def get_ss_lists(subsys):
return subsys.get('L') or []
def approvers_have_approved(approved_by, approvers):
for n in approvers:
# Ignore anything after the email (Github ID...)
n = n.split('>', 1)[0]
for m in approved_by:
m = m.split('>', 1)[0]
if n == m:
return True
return False
def download(pr):
url = "https://github.com/OP-TEE/optee_os/pull/{}.patch".format(pr)
f = tempfile.NamedTemporaryFile(mode="wb", prefix="pr{}_".format(pr),
suffix=".patch", delete=False)
print("Downloading {}...".format(url), end='', flush=True)
f.write(urlopen(url).read())
print(" Done.")
return f.name
def show_release_to(subsystems):
check_cwd()
with open("MAINTAINERS", "r") as f:
emails = sorted(set(re.findall(r'[RM]:\t(.*[\w]*<[\w\.-]+@[\w\.-]+>)',
f.read())))
emails += get_ss_lists(subsystems["THE REST"])
print(*emails, sep=', ')
def main():
global args
args = get_args()
all_subsystems = parse_maintainers()
if args.release_to:
show_release_to(all_subsystems)
return
paths = []
arglist = []
downloads = []
split_patches = []
for pr in args.github_pr or []:
downloads += [download(pr)]
for arg in args.arg + downloads:
if os.path.exists(arg):
patches = split_patchset(arg)
if patches:
split_patches += patches
continue
arglist.append(arg)
for arg in arglist + split_patches:
patch_paths = []
approved_by = []
if os.path.exists(arg):
# Try to parse as a patch
(patch_paths, approved_by) = get_paths_from_patch(arg)
if not patch_paths:
# Not a patch, consider the path itself
# as_posix() cleans the path a little bit (suppress leading ./ and
# duplicate slashes...)
patch_paths = [PurePath(arg).as_posix()]
for path in patch_paths:
approved = False
if args.merge_check:
ss_for_path = get_subsystems_for_path(all_subsystems, path,
args.strict)
for key in ss_for_path:
ss_approvers = get_ss_approvers(ss_for_path[key])
if approvers_have_approved(approved_by, ss_approvers):
approved = True
if not approved:
paths += [path]
for f in downloads + split_patches:
os.remove(f)
if args.file:
paths += args.file
if (args.show_paths):
print(paths)
ss = {}
for path in paths:
ss.update(get_subsystems_for_path(all_subsystems, path, args.strict))
for key in ss:
ss_name = key[:50] + (key[50:] and '...')
for name in ss[key].get('M') or []:
print("{} (maintainer:{})".format(name, ss_name))
for name in ss[key].get('R') or []:
print("{} (reviewer:{})".format(name, ss_name))
if __name__ == "__main__":
main()
| optee_os-nvidia-rel-35 | scripts/get_maintainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
inputfile = "../data/case8/coronacases_008.mhd"
outputfile = "/home/shekhar/temp/output1.mhd"
from clara.imaging import algos as cta
params = cta.map_params()
params['inpath'] = inputfile
params['outpath'] = outputfile
params['logs'] = '0'
params['iterations'] = '12'
status = cta.ct_bone(params)
if status == 1:
print("Bone segmentation unsuccessful")
| clara-ia-main | examples/algorithms/ct_bone_segmentation/python/ct_bone_seg_operator.py |
#!/usr/bin/env python3
# Copyright 2021, NVIDIA Corporation
# SPDX-License-Identifier: MIT
"""
Sample parser for redistrib JSON manifests
1. Downloads each archive
2. Validates SHA256 checksums
3. Extracts archives
4. Flattens into a collapsed directory structure
"""
import argparse
import os.path
import hashlib
import json
import re
import shutil
import tarfile
import zipfile
import sys
import requests
__version__ = "0.3.0"
ARCHIVES = {}
DOMAIN = "https://developer.download.nvidia.com"
OUTPUT = "flat"
PRODUCT = None
LABEL = None
URL = None
OS = None
ARCH = None
PLATFORM = None
COMPONENT = None
# Default actions
RETRIEVE = True
VALIDATE = True
UNROLLED = True
COLLAPSE = True
def err(msg):
"""Print error message and exit"""
print("ERROR: " + msg)
sys.exit(1)
def fetch_file(full_path, filename):
"""Download file to disk"""
download = requests.get(full_path)
if download.status_code != 200:
print(" -> Failed: " + filename)
else:
print(":: Fetching: " + full_path)
with open(filename, "wb") as file:
file.write(download.content)
print(" -> Wrote: " + filename)
def get_hash(filename):
"""Calculate SHA256 checksum for file"""
buffer_size = 65536
sha256 = hashlib.sha256()
with open(filename, "rb") as file:
while True:
chunk = file.read(buffer_size)
if not chunk:
break
sha256.update(chunk)
return sha256.hexdigest()
def check_hash(filename, checksum):
"""Compare checksum with expected"""
sha256 = get_hash(filename)
if checksum == sha256:
print(" Verified sha256sum: " + sha256)
else:
print(" => Mismatch sha256sum:")
print(" -> Calculation: " + sha256)
print(" -> Expectation: " + checksum)
def flatten_tree(src, dest, tag=None):
if tag:
dest += "/" + tag
"""Merge hierarchy from multiple directories"""
try:
shutil.copytree(src, dest, symlinks=1, dirs_exist_ok=1, ignore_dangling_symlinks=1)
except FileExistsError:
pass
shutil.rmtree(src)
def parse_artifact(parent, MANIFEST, component, platform, variant=None):
if variant:
full_path = parent + MANIFEST[component][platform][variant]['relative_path']
else:
full_path = parent + MANIFEST[component][platform]['relative_path']
filename = os.path.basename(full_path)
if RETRIEVE and not os.path.exists(filename) and not os.path.exists(parent + filename):
# Download archive
fetch_file(full_path, filename)
ARCHIVES[platform].append(filename)
elif os.path.exists(filename):
print(" -> Found: " + filename)
ARCHIVES[platform].append(filename)
elif os.path.exists(parent + filename):
print(" -> Found: " + parent + filename)
ARCHIVES[platform].append(parent + filename)
else:
print(" -> Artifact: " + filename)
if VALIDATE and os.path.exists(filename):
if variant:
checksum = MANIFEST[component][platform][variant]['sha256']
else:
checksum = MANIFEST[component][platform]['sha256']
# Compare checksum
check_hash(filename, checksum)
def fetch_action(parent):
"""Do actions while parsing JSON"""
for component in MANIFEST.keys():
if not 'name' in MANIFEST[component]:
continue
if COMPONENT is not None and component != COMPONENT:
continue
print("\n" + MANIFEST[component]['name'] + ": " + MANIFEST[component]['version'])
for platform in MANIFEST[component].keys():
if "variant" in platform:
continue
if not platform in ARCHIVES:
ARCHIVES[platform] = []
if not isinstance(MANIFEST[component][platform], str):
if PLATFORM is not None and platform != PLATFORM:
print(" -> Skipping platform: " + platform)
continue
if not "relative_path" in MANIFEST[component][platform]:
for variant in MANIFEST[component][platform].keys():
parse_artifact(parent, MANIFEST, component, platform, variant)
else:
parse_artifact(parent, MANIFEST, component, platform)
def post_action():
"""Extract archives and merge directories"""
if len(ARCHIVES) == 0:
return
print("\nArchives:")
if not os.path.exists(OUTPUT):
os.makedirs(OUTPUT)
for platform in ARCHIVES:
for archive in ARCHIVES[platform]:
try:
binTag = archive.split("-")[3].split("_")[1]
print(platform, binTag)
except:
binTag = None
# Tar files
if UNROLLED and re.search(r"\.tar\.", archive):
print(":: tar: " + archive)
tarball = tarfile.open(archive)
topdir = os.path.commonprefix(tarball.getnames())
tarball.extractall()
tarball.close()
print(" -> Extracted: " + topdir + "/")
if COLLAPSE:
flatten_tree(topdir, OUTPUT + "/" + platform, binTag)
# Zip files
elif UNROLLED and re.search(r"\.zip", archive):
print(":: zip: " + archive)
with zipfile.ZipFile(archive) as zippy:
topdir = os.path.commonprefix(zippy.namelist())
zippy.extractall()
zippy.close()
print(" -> Extracted: " + topdir)
if COLLAPSE:
flatten_tree(topdir, OUTPUT + "/" + platform, binTag)
print("\nOutput: " + OUTPUT + "/")
for item in sorted(os.listdir(OUTPUT)):
if os.path.isdir(OUTPUT + "/" + item):
print(" - " + item + "/")
elif os.path.isfile(OUTPUT + "/" + item):
print(" - " + item)
# If running standalone
if __name__ == '__main__':
# Parse CLI arguments
PARSER = argparse.ArgumentParser()
# Input options
PARSER_GROUP = PARSER.add_mutually_exclusive_group(required=True)
PARSER_GROUP.add_argument('-u', '--url', dest='url', help='URL to manifest')
PARSER_GROUP.add_argument('-l', '--label', dest='label', help='Release label version')
PARSER.add_argument('-p', '--product', dest='product', help='Product name')
PARSER.add_argument('-o', '--output', dest='output', help='Output directory')
# Filter options
PARSER.add_argument('--component', dest='component', help='Component name')
PARSER.add_argument('--os', dest='os', help='Operating System')
PARSER.add_argument('--arch', dest='arch', help='Architecture')
# Toggle actions
PARSER.add_argument('-w', '--download', dest='retrieve', action='store_true', \
help='Download archives', default=True)
PARSER.add_argument('-W', '--no-download', dest='retrieve', action='store_false', \
help='Parse manifest without downloads')
PARSER.add_argument('-s', '--checksum', dest='validate', action='store_true', \
help='Verify SHA256 checksum', default=True)
PARSER.add_argument('-S', '--no-checksum', dest='validate', action='store_false', \
help='Skip SHA256 checksum validation')
PARSER.add_argument('-x', '--extract', dest='unrolled', action='store_true', \
help='Extract archives', default=True)
PARSER.add_argument('-X', '--no-extract', dest='unrolled', action='store_false', \
help='Do not extract archives')
PARSER.add_argument('-f', '--flatten', dest='collapse', action='store_true', \
help='Collapse directories', default=True)
PARSER.add_argument('-F', '--no-flatten', dest='collapse', action='store_false', \
help='Do not collapse directories')
ARGS = PARSER.parse_args()
#print(ARGS)
RETRIEVE = ARGS.retrieve
VALIDATE = ARGS.validate
UNROLLED = ARGS.unrolled
COLLAPSE = ARGS.collapse
# Define variables
if ARGS.label is not None:
LABEL = ARGS.label
if ARGS.product is not None:
PRODUCT = ARGS.product
if ARGS.url is not None:
URL = ARGS.url
if ARGS.output is not None:
OUTPUT = ARGS.output
if ARGS.component is not None:
COMPONENT = ARGS.component
if ARGS.os is not None:
OS = ARGS.os
if ARGS.arch is not None:
ARCH = ARGS.arch
#
# Setup
#
# Sanity check
if not UNROLLED:
COLLAPSE = False
# Short-hand
if LABEL:
if PRODUCT:
URL = f"{DOMAIN}/compute/{PRODUCT}/redist/redistrib_{LABEL}.json"
else:
err("Must pass --product argument")
# Concatentate
if ARCH is not None and OS is not None:
PLATFORM = f"{OS}-{ARCH}"
elif ARCH is not None and OS is None:
err("Must pass --os argument")
elif OS is not None and ARCH is None:
err("Must pass --arch argument")
#
# Run
#
# Parse JSON
if os.path.isfile(URL):
with open(URL, "rb") as f:
MANIFEST = json.load(f)
else:
try:
MANIFEST = requests.get(URL).json()
except json.decoder.JSONDecodeError:
err("redistrib JSON manifest file not found")
print(":: Parsing JSON: " + URL)
# Do stuff
fetch_action(os.path.dirname(URL) + "/")
if UNROLLED:
post_action()
### END ###
| build-system-archive-import-examples-main | parse_redist.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
""" Expose air_sdk package """
from .air_sdk import *
| air_sdk-main | __init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Simulation module
"""
from . import util
from .air_model import AirModel
class Simulation(AirModel):
"""
Manage a Simulation
### json
Returns a JSON string representation of the simulation
### refresh
Syncs the simulation with all values returned by the API
### update
Update the simulation with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted or not self.title:
return super().__repr__()
return f'<Simulation \'{self.title}\' {self.id}>'
def create_service(self, name, interface, dest_port, **kwargs):
"""
Create a new service for this simulation
Arguments:
name (str): Name of the service
interface (str | `SimulationInterface`): Interface that the service should be created
for. This can be provided in one of the following formats:
- [`SimulationInterface`](/docs/simulation-interface) object
- ID of a [`SimulationInterface`](/docs/simulation-interface)
- String in the format of 'node_name:interface_name'
dest_port (int): Service port number
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Service`](/docs/service)
Example:
```
>>> simulation.create_service('myservice', 'oob-mgmt-server:eth0', 22, service_type='ssh')
<Service myservice cc18d746-4cf0-4dd3-80c0-e7df68bbb782>
>>> simulation.create_service('myservice', simulation_interface, 22, service_type='ssh')
<Service myservice 9603d0d5-5526-4a0f-91b8-a600010d0091>
```
"""
service = self._api.client.services.create(simulation=self.id, name=name,
interface=interface, dest_port=dest_port,
**kwargs)
self.refresh()
return service
def add_permission(self, email, **kwargs):
"""
Adds permission for a given user to this simulation.
Arguments:
email (str): Email address of the user being given permission
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Permission`](/docs/permission)
Example:
```
>>> simulation.add_permission('[email protected]', write_ok=True)
<Permission 217bea68-7048-4262-9bbc-b98ab16c603e>
```
"""
return self._api.client.permissions.create(email=email, simulation=self.id, **kwargs)
@util.required_kwargs(['action'])
def control(self, **kwargs):
"""
Sends a control command to the simulation.
Arguments:
action (str): Control command
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
dict: Response JSON
Example:
```
>>> simulation.control(action='destroy')
{'result': 'success'}
```
"""
url = f'{self._api.url}{self.id}/control/'
res = self._api.client.post(url, json=kwargs)
util.raise_if_invalid_response(res)
return res.json()
def load(self):
""" Alias for `start()` """
self.start()
def start(self):
""" Start/load the simulation """
self.control(action='load')
self.refresh()
def stop(self):
""" Alias for `store()` """
self.store()
def store(self):
""" Store and power off the simulation """
self.control(action='store')
self.refresh()
def delete(self):
""" Delete the simulation """
self.control(action='destroy')
self._deleted = True
class SimulationApi:
""" High-level interface for the Simulation API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/simulation/'
@util.deprecated('SimulationApi.list()')
def get_simulations(self): #pylint: disable=missing-function-docstring
return self.list()
@util.deprecated('SimulationApi.get()')
def get_simulation(self, simulation_id): #pylint: disable=missing-function-docstring
return self.get(simulation_id)
@util.deprecated('SimulationApi.create()')
def create_simulation(self, **kwargs): #pylint: disable=missing-function-docstring
return self.create(**kwargs)
@util.deprecated('Simulation.update()')
def update_simulation(self, simulation_id, data): #pylint: disable=missing-function-docstring
sim = self.get(simulation_id)
sim.update(**data)
def duplicate(self, simulation, **kwargs):
"""
Duplicate/clone an existing simulation
Arguments:
simulation (str | `Simulation`): Simulation or ID of the snapshot to be duplicated
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
([`Simulation`](/docs/simulation), dict): Newly created simulation and response JSON
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulations.duplicate(simulation=simulation)
<Simulation my_sim 5ff3f0dc-7db8-4938-8257-765c8e48623a>
```
"""
sim = simulation
if isinstance(sim, str):
sim = self.get(simulation)
kwargs['action'] = 'duplicate'
response = sim.control(**kwargs)
return Simulation(self, **response['simulation']), response
@util.deprecated('Simulation.control()')
def control(self, simulation_id, action, **kwargs): #pylint: disable=missing-function-docstring
sim = self.get(simulation_id)
return sim.control(action=action, **kwargs)
def get_citc_simulation(self):
"""
Get the active CITC reference simulation
Returns:
[`Simulation`](/docs/simulation)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulations.get_citc_simulation()
<Simulation my_sim b9125419-7c6e-41db-bba9-7d647d63943e>
```
"""
url = self.url + 'citc/'
res = self.client.get(url)
util.raise_if_invalid_response(res)
return Simulation(self, **res.json())
def get(self, simulation_id, **kwargs):
"""
Get an existing simulation
Arguments:
simulation_id (str): Simulation ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Simulation`](/docs/simulation)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulations.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Simulation my_sim 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{simulation_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Simulation(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing simulations
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulations.list()
[<Simulation sim1 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Simulation sim2 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Simulation(self, **simulation) for simulation in res.json()]
@util.required_kwargs(['topology'])
def create(self, **kwargs):
"""
Create a new simulation
Arguments:
topology (str | `Topology`): `Topology` or ID
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Simulation`](/docs/simulation)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulations.create(topology=topology, title='my_sim')
<Simulation my_sim 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
"""
util.validate_timestamps('Simulation created', expires_at=kwargs.get('expires_at'),
sleep_at=kwargs.get('sleep_at'))
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Simulation(self, **res.json())
| air_sdk-main | air_sdk/simulation.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Link module
"""
from . import util
from .air_model import AirModel
class Link(AirModel):
"""
Manage a Link
### delete
Delete the link. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the link
### refresh
Syncs the link with all values returned by the API
### update
Update the link with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<Link {self.id}>'
class LinkApi:
""" High-level interface for the Link API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/link/'
def get(self, link_id, **kwargs):
"""
Get an existing link
Arguments:
link_id (str): Link ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Link`](/docs/link)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.links.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Link 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{link_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Link(self, **res.json())
def list(self, **kwargs):
"""
List existing links
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.links.list()
[<Link c51b49b6-94a7-4c93-950c-e7fa4883591>, <Link 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
"""
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Link(self, **link) for link in res.json()]
@util.required_kwargs(['topology', 'interfaces'])
def create(self, **kwargs):
#pylint: disable=line-too-long
"""
Create a new link
Arguments:
topology (str | `Topology`): `Topology` or ID
interfaces (list): List of `Interface` objects or IDs
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Link`](/docs/link)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.links.create(topology=topology, interfaces=[intf1, 'fd61e3d8-af2f-4735-8b1d-356ee6bf4abe'])
<Link 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
""" #pylint: enable=line-too-long
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Link(self, **res.json())
| air_sdk-main | air_sdk/link.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
SSH Key module
"""
from . import util
from .air_model import AirModel
class SSHKey(AirModel):
"""
Manage a SSH Key
### delete
Delete the key. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the key
### refresh
Syncs the key with all values returned by the API
"""
_updatable = False
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<SSHKey {self.name} {self.id}>'
class SSHKeyApi:
""" High-level interface for the SSHKey API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/sshkey/'
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing keys
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.ssh_keys.list()
[<SSHKey mykey c51b49b6-94a7-4c93-950c-e7fa4883591>, <SSHKey test_key 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [SSHKey(self, **key) for key in res.json()]
@util.required_kwargs(['public_key', 'name'])
def create(self, **kwargs):
"""
Add a new public key to your account
Arguments:
name (str): Descriptive name for the public key
public_key (str): Public key
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`SSHKey`](/docs/sshkey)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.ssh_keys.create(name='my_pub_key', public_key='<key_string>')
<SSHKey my_pub_key 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
"""
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return SSHKey(self, **res.json())
| air_sdk-main | air_sdk/ssh_key.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Worker module
"""
from . import util
from .air_model import AirModel
class Worker(AirModel):
"""
Manage a Worker
### json
Returns a JSON string representation of the worker
### refresh
Syncs the worker with all values returned by the API
### update
Update the worker with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted or not self.fqdn:
return super().__repr__()
return f'<Worker {self.fqdn} {self.id}>'
@util.deprecated('<worker_instance>.available')
def set_available(self, available):
"""
Sets a worker's `available` value in AIR
Arguments:
available (bool)
"""
self.available = available #pylint: disable=attribute-defined-outside-init
class WorkerApi:
""" High-level interface for the Worker API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/worker/'
@util.deprecated('WorkerApi.list()')
def get_workers(self, **kwargs): #pylint: disable=missing-function-docstring
return self.list(**kwargs)
@util.deprecated('Worker.update()')
def update_worker(self, worker_id, **kwargs): #pylint: disable=missing-function-docstring
worker = self.get(worker_id)
return worker.update(**kwargs)
def get(self, worker_id, **kwargs):
"""
Get an existing worker
Arguments:
worker_id (str): Worker ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Worker`](/docs/worker)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.workers.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Worker worker01 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{worker_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Worker(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing workers
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.workers.list()
[<Worker worker01 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Worker worker02 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Worker(self, **worker) for worker in res.json()]
@util.required_kwargs(['cpu', 'memory', 'storage', 'ip_address', 'port_range', 'username',
'password'])
def create(self, **kwargs):
#pylint: disable=line-too-long
"""
Create a new worker
Arguments:
cpu (int): Number of vCPUs the worker can support
memory (int): Amount of memory (in MB) a worker can support
storage (int): Amount of storage (in GB) a worker can support
ip_address (str): Internal IP address
port_range (str): Range of ports available on the worker
username (str): Worker username for API access
password (str): Worker password for API access
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Worker`](/docs/worker)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.workers.create(cpu=100, memory=200000, storage=1000, ip_address='10.1.1.1', port_range='10000-30000', username='worker01', password='secret')
<Worker my_sim 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
""" #pylint: enable=line-too-long
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Worker(self, **res.json())
| air_sdk-main | air_sdk/worker.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
NVIDIA Air API module
"""
#pylint: disable=too-many-public-methods
from datetime import date, datetime
import logging
from json import JSONDecodeError
import requests
from requests.compat import urlparse
from . import util
from .account import AccountApi
from .air_model import AirModel, LazyLoaded
from .capacity import CapacityApi
from .demo import DemoApi
from .exceptions import AirAuthorizationError, AirForbiddenError, AirUnexpectedResponse
from .image import ImageApi
from .interface import InterfaceApi
from .job import JobApi
from .link import LinkApi
from .login import LoginApi
from .marketplace import MarketplaceApi
from .node import NodeApi
from .organization import OrganizationApi
from .permission import PermissionApi
from .resource_budget import ResourceBudgetApi
from .service import ServiceApi
from .ssh_key import SSHKeyApi
from .simulation import SimulationApi
from .simulation_interface import SimulationInterfaceApi
from .simulation_node import SimulationNodeApi
from .token import TokenApi
from .topology import TopologyApi
from .worker import WorkerApi
ALLOWED_HOSTS = ['air.nvidia.com', 'staging.air.nvidia.com', 'air.cumulusnetworks.com',
'staging.air.cumulusnetworks.com']
class AirSession(requests.Session):
""" Wrapper around requests.Session """
def rebuild_auth(self, prepared_request, response):
""" Allow credential sharing between nvidia.com and cumulusnetworks.com only """
if urlparse(prepared_request.url).hostname in ALLOWED_HOSTS:
return
super().rebuild_auth(prepared_request, response)
class AirApi:
"""
Main interface for an API client instance
"""
def __init__(self, api_url='https://air.nvidia.com/api/', api_version='v1', **kwargs):
"""
Create a new API client instance. The caller MUST provide either `username` and `password`
or a `bearer_token`. The `password` argument may either be an API token or a service account
password.
Arguments:
username (str, optional): Username
password (str, optional): Password or API token
bearer_token (str, optional): Pre-generated bearer token
api_url (str, optional): Default = https://air.nvidia.com/api/
api_version (str): Default = v1
"""
self.client = AirSession()
self.client.headers.update({'content-type': 'application/json'})
self.api_url = _normalize_api_url(api_url) + _normalize_api_version(api_version)
self.token = None
self.username = None
self.authorize(**kwargs)
#pylint: disable=missing-function-docstring
@property
def accounts(self):
return AccountApi(self)
@property
def api_tokens(self):
return TokenApi(self)
@property
def capacity(self):
return CapacityApi(self)
@property
def demos(self):
return DemoApi(self)
@property
def images(self):
return ImageApi(self)
@property
def interfaces(self):
return InterfaceApi(self)
@property
def jobs(self):
return JobApi(self)
@property
def links(self):
return LinkApi(self)
@property
def login(self):
return LoginApi(self)
@property
def marketplace(self):
return MarketplaceApi(self)
@property
@util.deprecated('AirApi.nodes')
def node(self):
return self.nodes
@property
def nodes(self):
return NodeApi(self)
@property
def organizations(self):
return OrganizationApi(self)
@property
@util.deprecated('AirApi.permissions')
def permission(self):
return self.permissions
@property
def permissions(self):
return PermissionApi(self)
@property
def resource_budgets(self):
return ResourceBudgetApi(self)
@property
@util.deprecated('AirApi.services')
def service(self):
return self.services
@property
def services(self):
return ServiceApi(self)
@property
@util.deprecated('AirApi.simulations')
def simulation(self):
return self.simulations
@property
def simulations(self):
return SimulationApi(self)
@property
@util.deprecated('AirApi.simulation_interfaces')
def simulation_interface(self):
return self.simulation_interfaces
@property
def simulation_interfaces(self):
return SimulationInterfaceApi(self)
@property
@util.deprecated('AirApi.simulation_nodes')
def simulation_node(self):
return self.simulation_nodes
@property
def simulation_nodes(self):
return SimulationNodeApi(self)
@property
def ssh_keys(self):
return SSHKeyApi(self)
@property
@util.deprecated('AirApi.topologies')
def topology(self):
return self.topologies
@property
def topologies(self):
return TopologyApi(self)
@property
@util.deprecated('AirApi.workers')
def worker(self):
return self.workers
@property
def workers(self):
return WorkerApi(self)
#pylint: enable=missing-function-docstring
def authorize(self, **kwargs):
"""
Authorizes the API client using either a pre-generated API token, a service account
username/password, or a pre-generated bearer token.
Callers MUST pass either a valid `bearer_token` or a `username` and `password`.
The `password` argument may either be an API token or a service account
password. After successfully authorizing, all subsequent API calls will include the
authorization token provided by the AIR API. **Note:** This is called once automatically
when an AirApi object is instantiated.
Arguments:
bearer_token (str, optional): Pre-generated bearer token
username (str, optional): Username
password (str, optional): Password or API token
Raises:
ValueError - Caller did not pass either a token or a username/password
"""
token = None
if kwargs.get('bearer_token'):
token = kwargs['bearer_token']
elif kwargs.get('username', None) and kwargs.get('password', None):
token = self.get_token(kwargs['username'], kwargs['password'])
else:
raise ValueError('Must include either `bearer_token` or ' + \
'`username` and `password` arguments')
self.token = token
self.client.headers.update({'authorization': 'Bearer ' + token})
login = self.login.list()
self.username = getattr(login, 'username', None)
def get_token(self, username, password):
"""
Gets a new bearer token for a given username and password
Arguments:
username (str): Username
password (str): Password
Returns:
str: Bearer token
Raises:
- [`AirAuthorizationError`](/docs/exceptions) - API did not return a token
- `JSONDecodeError` - API's response is not a valid JSON object
"""
route = '/login/'
data = {'username': username, 'password': password}
res = self.post(self.api_url + route, json=data)
try:
if res.json().get('token', None):
return res.json()['token']
logging.debug('AirApi.get_token :: Response JSON')
logging.debug(res.json())
raise AirAuthorizationError('API did not provide a token for ' + username)
except JSONDecodeError:
raise AirAuthorizationError('API did not return a valid JSON response')
def _request(self, method, url, *args, **kwargs):
if kwargs.get('json'):
logging.debug(f'unserialized json: {kwargs["json"]}')
if isinstance(kwargs['json'], list):
kwargs['json'] = [_serialize_dict(obj) for obj in kwargs['json']]
else:
kwargs['json'] = _serialize_dict(kwargs['json'])
if kwargs.get('params'):
kwargs['params'] = _serialize_dict(kwargs['params'])
logging.debug(f'request args: {args}')
logging.debug(f'request kwargs: {kwargs}')
res = self.client.request(method, url, allow_redirects=False, *args, **kwargs)
if (res.status_code == 301
and urlparse(res.headers.get('Location')).hostname in ALLOWED_HOSTS):
res = self.client.request(method, res.headers['Location'], *args, **kwargs)
if getattr(res, 'status_code') == 403:
raise AirForbiddenError
try:
res.raise_for_status()
except requests.exceptions.HTTPError as err:
raise AirUnexpectedResponse(err.response.text, err.response.status_code)
return res
def get(self, url, *args, **kwargs):
""" Wrapper method for GET requests """
return self._request('GET', url, *args, **kwargs)
def post(self, url, *args, **kwargs):
""" Wrapper method for POST requests """
return self._request('POST', url, *args, **kwargs)
def put(self, url, *args, **kwargs):
""" Wrapper method for PUT requests """
return self._request('PUT', url, *args, **kwargs)
def patch(self, url, *args, **kwargs):
""" Wrapper method for PATCH requests """
return self._request('PATCH', url, *args, **kwargs)
def delete(self, url, *args, **kwargs):
""" Wrapper method for DELETE requests """
return self._request('DELETE', url, *args, **kwargs)
def _normalize_api_version(version):
try:
version = int(version)
version = f'v{version}'
except Exception:
pass
return version
def _normalize_api_url(url):
if url[-1] != '/':
url += '/'
if not url.endswith('api/'):
url += 'api/'
return url
def _serialize_dict(raw_dict):
clone = {}
for key, value in raw_dict.items():
if isinstance(value, (AirModel, LazyLoaded)):
clone[key] = value.id
elif isinstance(value, dict):
clone[key] = _serialize_dict(value)
elif isinstance(value, list):
clone[key] = _serialize_list(value)
elif isinstance(value, (datetime, date)):
clone[key] = value.isoformat()
elif not key.startswith('_'):
clone[key] = value
return clone
def _serialize_list(raw_list):
clone = []
for item in raw_list:
if isinstance(item, (AirModel, LazyLoaded)):
clone.append(item.id)
elif isinstance(item, dict):
clone.append(_serialize_dict(item))
elif isinstance(item, list):
clone.append(_serialize_list(item))
elif not str(item).startswith('_'):
clone.append(item)
return clone
| air_sdk-main | air_sdk/air_api.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Service module
"""
from . import util
from .air_model import AirModel
class Service(AirModel):
"""
Manage a Service
### delete
Delete the service. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the service
### refresh
Syncs the service with all values returned by the API
### update
Update the service with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Service {self.name} {self.id}>'
class ServiceApi:
""" High-level interface for the Service API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/service/'
@util.deprecated('ServiceApi.list()')
def get_services(self): #pylint: disable=missing-function-docstring
return self.list()
@util.deprecated('ServiceApi.get()')
def get_service(self, service_id): #pylint: disable=missing-function-docstring
return self.get(service_id)
@util.deprecated('ServiceApi.create()')
def create_service(self, simulation_id, name, interface, dest_port, **kwargs): #pylint: disable=missing-function-docstring
return self.create(simulation=simulation_id, name=name, interface=interface,
dest_port=dest_port, **kwargs)
def get(self, service_id, **kwargs):
"""
Get an existing service
Arguments:
service_id (str): Service ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Service`](/docs/service)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.services.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Service SSH 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{service_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Service(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing services
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.services.list()
[<Service SSH c51b49b6-94a7-4c93-950c-e7fa4883591>, <Service HTTP 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Service(self, **service) for service in res.json()]
@util.required_kwargs(['name', 'simulation', 'interface'])
def create(self, **kwargs):
"""
Create a new service
Arguments:
name (str): Service name
interface (str | `SimulationInterface`): Interface that the service should be created
for. This can be provided in one of the following formats:
- [`SimulationInterface`](/docs/simulationinterface) object
- ID of a [`SimulationInterface`](/docs/simulationinterface)
- String in the format of 'node_name:interface_name'
simulation (str | `Simulation`): `Simulation` or ID
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Service`](/docs/service)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.services.create(name='myservice', interface='oob-mgmt-server:eth0', dest_port=22)
<Service myservice cc18d746-4cf0-4dd3-80c0-e7df68bbb782>
>>> air.services.create(name='myservice', interface=simulation_interface, dest_port=22)
<Service myservice 9603d0d5-5526-4a0f-91b8-a600010d0091>
```
"""
if isinstance(kwargs['interface'], str) and ':' in kwargs['interface']:
kwargs['interface'] = self._resolve_interface(kwargs['interface'], kwargs['simulation'])
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Service(self, **res.json())
def _resolve_interface(self, interface, simulation):
try:
node_name = interface.split(':')[0]
interface_name = interface.split(':')[1]
except (SyntaxError, IndexError):
raise ValueError('`interface` must be an Interface object or in the format of ' + \
'"node_name:interface_name"')
resolved = None
for node in self.client.nodes.list(simulation=simulation):
if node.name == node_name:
for intf in node.interfaces:
if intf.name == interface_name:
resolved = intf
break
if not resolved:
raise ValueError('Interface ' + interface + ' does not exist')
return self.client.simulation_interfaces.list(original=resolved, simulation=simulation)[0]
| air_sdk-main | air_sdk/service.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Capacity module
"""
from . import util
from .air_model import AirModel
class Capacity(AirModel):
"""
View platform capacity
### json
Returns a JSON string representation of the capacity
### refresh
Syncs the capacity with all values returned by the API
"""
_deletable = False
_updatable = False
def __repr__(self):
if self._deleted or not self.copies:
return super().__repr__()
return f'<Capacity {self.copies}>'
class CapacityApi:
""" High-level interface for the Simulation API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/capacity/'
@util.deprecated('CapacityApi.get()')
def get_capacity(self, simulation=None, simulation_id=None): #pylint: disable=missing-function-docstring
if not simulation and not simulation_id:
raise ValueError('Must pass a simulation or simulation_id argument')
sim_id = simulation_id or simulation.id
return self.get(simulation_id=sim_id)
def get(self, simulation_id, **kwargs):
"""
Get current platform capacity for a [`Simulation`](/docs/simulation)
Arguments:
simulation_id (str | `Simulation`): Simulation or ID
Returns:
[`Capacity`](/docs/capacity)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.capacity.get(simulation)
<Capacity 30>
```
"""
if isinstance(simulation_id, AirModel):
simulation_id = simulation_id.id
url = f'{self.url}{simulation_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Capacity(self, **res.json())
| air_sdk-main | air_sdk/capacity.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Token module
"""
from . import util
from .air_model import AirModel
class Token(AirModel):
"""
View an API Token
### json
Returns a JSON string representation of the interface
### refresh
Syncs the interface with all values returned by the API
"""
#_deletable = False
_updatable = False
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
if hasattr(self, 'id'):
return f'<Token {self.name} {self.id}>'
if hasattr(self, 'token'):
return f'<Token {self.name} {self.token}>'
return f'<Token {self.name}>'
class TokenApi:
""" High-level interface for the Token API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/api-token/'
def delete(self, token_id, **kwargs):
"""
Deletes an api token
Arguments:
token_id (str): Token ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 204 No Content
or valid response JSON
Example:
```
>>> air.api_tokens.delete('3dadd54d-583c-432e-9383-a2b0b1d7f551')
```
"""
url = f'{self.url}{token_id}/'
res = self.client.delete(url, params=kwargs)
util.raise_if_invalid_response(res, status_code=204, data_type=None)
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing tokens
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.tokens.list()
[<Interface eth0 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Interface eth1 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
"""
#pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Token(self, **token) for token in res.json()]
@util.required_kwargs(['name'])
def create(self, **kwargs):
"""
Add a new api token to your account
Arguments:
name (str): Descriptive name for the api token
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`api-token`](/docs/api-token)
Raises:
[`AirUnexpectedResponse`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.api_tokens.create(name='my_api_token')
<Token my_api_token 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
"""
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Token(self, **res.json())
| air_sdk-main | air_sdk/token.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Permission module
"""
from . import util
from .air_model import AirModel
class Permission(AirModel):
"""
Manage a Permission
### delete
Delete the permission. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the permission
### refresh
Syncs the permission with all values returned by the API
### update
Update the permission with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_updatable = False
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<Permission {self.id}>'
class PermissionApi:
""" High-level interface for the Permission API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/permission/'
@util.deprecated('PermissionApi.create()')
def create_permission(self, email, **kwargs): #pylint: disable=missing-function-docstring
kwargs['email'] = email
return self.create(**kwargs)
@util.required_kwargs([('topology', 'simulation', 'subject_id'), 'email'])
def create(self, **kwargs):
"""
Create a new permission. The caller MUST provide `simulation`, `topology`, or `subject_id`
Arguments:
email (str): Email address for the user being granted permission
simulation (str | `Simulation`, optional): `Simulation` or ID
topology (str | `Topology`, optional): `Topology` or ID
subject_id (str | `AirModel`, optional): `AirModel` instance or ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Permission`](/docs/permission)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.permissions.create(email='[email protected]', topology=topology, write_ok=True)
<Permission 01298e0c-4ef1-43ec-9675-93160eb29d9f>
>>> air.permissions.create(email='[email protected]',
... subject_id='80cf922a-7b80-4795-8cc5-550833ab1cec', subject_model='simulation.image')
<Permission 8a09ea66-51f9-4ddd-8416-62c266cd959e>
```
"""
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Permission(self, **res.json())
def get(self, permission_id, **kwargs):
"""
Get an existing permission
Arguments:
permission_id (str): Permission ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Permission`](/docs/permission)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.permissions.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Permission 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{permission_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Permission(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing permissions
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.permissions.list()
[<Permission c51b49b6-94a7-4c93-950c-e7fa4883591>, <Permission 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Permission(self, **permission) for permission in res.json()]
| air_sdk-main | air_sdk/permission.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Helper utils
"""
import datetime
import logging
from json import JSONDecodeError
from dateutil import parser as dateparser
from .exceptions import AirUnexpectedResponse
def raise_if_invalid_response(res, status_code=200, data_type=dict):
"""
Validates that a given API response has the expected status code and JSON payload
Arguments:
res (requests.HTTPResponse) - API response object
status_code [int] - Expected status code (default: 200)
Raises:
AirUnexpectedResponse - Raised if an unexpected response is received from the API
"""
json = None
if res.status_code != status_code:
logging.debug(res.text)
raise AirUnexpectedResponse(message=res.text, status_code=res.status_code)
if not data_type:
return
try:
json = res.json()
except JSONDecodeError:
raise AirUnexpectedResponse(message=res.text, status_code=res.status_code)
if not isinstance(json, data_type):
raise AirUnexpectedResponse(message=f'Expected API response to be of type {data_type}, ' + \
f'got {type(json)}',
status_code=res.status_code)
def required_kwargs(required):
""" Decorator to enforce required kwargs for a function """
if not isinstance(required, list):
required = [required]
def wrapper(method):
def wrapped(*args, **kwargs):
for arg in required:
if isinstance(arg, tuple):
present = False
for option in arg:
if option in kwargs:
present = True
break
if not present:
raise AttributeError(f'{method} requires one of the following: {arg}')
else:
if arg not in kwargs:
raise AttributeError(f'{method} requires {arg}')
return method(*args, **kwargs)
return wrapped
return wrapper
def deprecated(new=None):
""" Decorator to log a warning when calling a deprecated function """
def wrapper(method):
def wrapped(*args, **kwargs):
msg = f'{method} has been deprecated and will be removed in a future release.'
if new:
msg += f' Use {new} instead.'
logging.warning(msg)
return method(*args, **kwargs)
return wrapped
return wrapper
def validate_timestamps(log_prefix, **kwargs):
"""
Logs a warning if any provided timestamps are in the past
Arguments:
log_prefix (str): Prefix to be prepended to the logged warning(s)
kwargs (dict): Timestamps to verify
"""
now = datetime.datetime.now()
for key, value in kwargs.items():
if value and dateparser.parse(str(value)) <= now:
logging.warning(f'{log_prefix} with `{key}` in the past: {value} (now: {now})')
def is_datetime_str(value):
"""
Checks to see if the string is a valid datetime format
Arguments:
value (str): String to test if valid datetime format
"""
if isinstance(value, str):
try:
return datetime.datetime.fromisoformat(value.replace('Z', '+00:00'))
except ValueError:
pass
return False
| air_sdk-main | air_sdk/util.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Organization module
"""
from . import util
from .air_model import AirModel
class Organization(AirModel):
"""
Manage an Organization
### delete
Delete the organization. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the organization
### refresh
Syncs the organization with all values returned by the API
### update
Update the organization with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
ORG_MEMBER_ROLE = 'Organization Member'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._members_api_url = f'{self._api.url}{self.id}/members/'
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Organization {self.name} {self.id}>'
def add_member(self, username: str, roles: list = None):
"""
Add a new member to the organization
Arguments:
username (str): The email address of the user to add
roles (list, optional): A list of roles to assign the user. Valid values are
'Organization Admin' or 'Organization Member'. If no roles list is provided,
'Organization Member' is used as the default role.
Example:
```
>>> organization.add_member('[email protected]')
>>> organization.add_member('[email protected]', roles=['Organization Admin'])
```
"""
_roles = roles
if not _roles:
_roles = [self.ORG_MEMBER_ROLE]
self._api.client.post(self._members_api_url, json={'username': username, 'roles': _roles})
self.refresh()
def add_members(self, members: list):
#pylint: disable=line-too-long
"""
Add new members to the organization
Arguments:
members (list): List of organization membership dicts in the format of
{'username': <email_address>, 'roles': [<role>]}.
'roles' is optional and defaults to ['Organization Member']
<role> can be a value of 'Organization Admin' or 'Organization Member'.
Example:
```
>>> organization.add_members([{'username': '[email protected]', 'roles': ['Organization Admin']}, {'username': '[email protected]'}])
```
""" #pylint: enable=line-too-long
for member in members:
if not member.get('roles', []):
member['roles'] = [self.ORG_MEMBER_ROLE]
self._api.client.post(self._members_api_url, json=members)
self.refresh()
def remove_member(self, username: str, **kwargs):
"""
Remove a member from the organization
Arguments:
username (str): The email address of the user to remove
Example:
```
>>> organization.remove_member('[email protected]')
"""
self._api.client.delete(self._members_api_url, json={'username': username})
if kwargs.get('_refresh_when_done', True):
self.refresh()
def remove_members(self, members: list):
"""
Remove multiple members from the organization
Arguments:
members (list): Email addresses of the users to remove
Example:
```
>>> organization.remove_members(['[email protected]', '[email protected]'])
"""
for member in members:
self.remove_member(member, _refresh_when_done=False)
self.refresh()
class OrganizationApi:
""" High-level interface for the Organization API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/organization/'
def get(self, organization_id, **kwargs):
"""
Get an existing organization
Arguments:
organization_id (str): Organization ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Organization`](/docs/organization)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.organizations.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Organization NVIDIA 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{organization_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Organization(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing organizations
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.organizations.list()
[<Organization NVIDIA c51b49b6-94a7-4c93-950c-e7fa4883591>, <Organization Customer 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Organization(self, **organization) for organization in res.json()]
@util.required_kwargs(['name'])
def create(self, **kwargs):
#pylint: disable=line-too-long
"""
Create a new organization
Arguments:
name (str): Organization name
members (list, optional): List of organization membership dicts in the format of
{'username': <email_address>, 'roles': [<role>]}.
'roles' is optional and defaults to ['Organization Member']
<role> can be a value of 'Organization Admin' or 'Organization Member'.
If no member list is provided, the calling user's account will be set as the
organization admin by default.
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Organization`](/docs/organization)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.organizations.create(name='NVIDIA', members=[{'username': '[email protected]', 'roles': ['Organization Admin']}, {'username': '[email protected]'}])
<Organization NVIDIA 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
""" #pylint: enable=line-too-long
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Organization(self, **res.json())
| air_sdk-main | air_sdk/organization.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Base classes for AIR object models
"""
from datetime import date, datetime
import json
from . import util
from .exceptions import AirObjectDeleted
class AirModel:
""" Base class for AIR object models """
model_keys = {'account': 'accounts', 'base_simulation': 'simulations', 'bios': 'images',
'connection': 'links', 'demo': 'demos', 'interface': 'simulation_interfaces',
'interfaces': {'Node': 'interfaces', 'SimulationNode': 'simulation_interfaces',
'Link': 'interfaces'},
'job': 'jobs', 'last_worker': 'worker',
'node': {'Interface': 'nodes', 'NodeInstruction': 'simulation_nodes',
'SimulationInterface': 'simulation_nodes',
'TopologyInstruction': 'nodes'},
'nodes': 'simulation_nodes',
'original': {'SimulationInterface': 'interfaces',
'SimulationNode': 'nodes'},
'organization': 'organizations', 'os': 'images', 'preferred_worker': 'workers',
'services': 'services', 'simulation': 'simulations', 'topology': 'topologies',
'worker': 'workers'}
def __init__(self, api, **kwargs):
self._deleted = False
super().__setattr__('_updatable', getattr(self, '_updatable', True))
super().__setattr__('_deletable', getattr(self, '_deletable', True))
self._api = api
self._load(**kwargs)
def _load(self, **kwargs):
for key, value in kwargs.items():
_value = value
datetime_obj = util.is_datetime_str(value)
if datetime_obj:
_value = datetime_obj
if key in self.model_keys and value:
if isinstance(value, list) and not isinstance(value, LazyLoadedList):
_value = LazyLoadedList([LazyLoaded(id=_get_item_id(item),
model=self._get_model_key(key))
for item in value], self._api)
elif isinstance(value, (LazyLoaded, LazyLoadedList)):
_value = value
elif value.startswith('http'):
_value = LazyLoaded(id=_value.split('/')[6], model=self._get_model_key(key))
else:
_value = LazyLoaded(id=_value, model=self._get_model_key(key))
super().__setattr__(key, _value)
def __repr__(self):
repr_str = super().__repr__()
if self._deleted:
repr_str = f'<Deleted Object ({repr_str})>'
return repr_str
def __getattribute__(self, name):
value = super().__getattribute__(name)
if name == '_deleted':
return value
if self._deleted:
raise AirObjectDeleted(type(self))
if isinstance(value, LazyLoaded):
value = getattr(self._api.client, value.model).get(value.id)
super().__setattr__(name, value)
return value
def __setattr__(self, name, value):
if name == '_deleted' or not self._updatable:
return super().__setattr__(name, value)
try:
original = super().__getattribute__(name)
api = super().__getattribute__('_api')
id = super().__getattribute__('id') #pylint: disable=redefined-builtin
except AttributeError:
original = None
api = None
id = None
if not name.startswith('_') and api and id and original != value:
self._patch(name, value)
return super().__setattr__(name, value)
def _get_model_key(self, key):
value = self.model_keys[key]
if isinstance(value, dict):
value = self.model_keys[key][self.__class__.__name__]
return value
def _patch(self, key, value):
url = f'{self._api.url}{self.id}/'
res = self._api.client.patch(url, json={key: value})
util.raise_if_invalid_response(res)
def update(self, **kwargs):
"""
Update the object with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
if not self._updatable:
raise NotImplementedError(f'{self.__class__.__name__} does not support updates')
url = f'{self._api.url}{self.id}/'
self.refresh()
self.__dict__.update(kwargs)
payload = self.__dict__
ignored_fields = getattr(self, '_ignored_update_fields', None)
if ignored_fields:
allowed_payload = {}
for key, value in payload.items():
if key not in ignored_fields:
allowed_payload[key] = value
payload = allowed_payload
res = self._api.client.put(url, json=payload)
util.raise_if_invalid_response(res)
def delete(self):
"""
Delete the object. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
"""
if not self._deletable:
raise NotImplementedError(f'{self.__class__.__name__} does not support deletes')
url = f'{self._api.url}{self.id}/'
res = self._api.client.delete(url)
util.raise_if_invalid_response(res, status_code=204, data_type=None)
self._deleted = True
def refresh(self):
""" Syncs the object with all values returned by the API """
self._load(**self._api.get(self.id).__dict__)
def json(self):
""" Returns a JSON string representation of the object """
payload = {}
for key, value in self.__dict__.items():
if isinstance(value, (datetime, date)):
value = value.isoformat()
if key.startswith('_'):
continue
if isinstance(value, (AirModel, LazyLoaded)):
payload[key] = value.id
elif isinstance(value, LazyLoadedList):
payload[key] = [obj.id for obj in value.__iter__(skip_load=True)]
else:
payload[key] = value
return json.dumps(payload)
class LazyLoaded:
""" A lazy object whose data will be loaded later """
def __init__(self, id, model): #pylint: disable=redefined-builtin
self.id = id
self.model = model
def __repr__(self):
model_str = self.model.capitalize()
if model_str == 'Topologies':
model_str = 'Topology'
elif model_str.endswith('s'):
model_str = model_str[:-1]
return f'<air_sdk.air_model.LazyLoaded {model_str} {self.id}>'
class LazyLoadedList(list):
""" A list whose items are LazyLoaded """
def __init__(self, items, api):
self._api = api
super().__init__(items)
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(value, LazyLoaded):
value = getattr(self._api.client, value.model).get(value.id)
self[index] = value
return value
def __iter__(self, skip_load=False):
items = super().__iter__()
for item in items:
if isinstance(item, LazyLoaded) and not skip_load:
yield getattr(self._api.client, item.model).get(item.id)
else:
yield item
def _get_item_id(item):
if isinstance(item, dict):
return item['id']
try:
return item.split('/')[6]
except (AttributeError, IndexError):
return item
| air_sdk-main | air_sdk/air_model.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.